hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a06276abf091c4bc563e75db3381f84b36c4772
| 1,239
|
py
|
Python
|
lib/spack/spack/test/cmd/debug.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-06-25T15:25:29.000Z
|
2020-06-25T15:25:29.000Z
|
lib/spack/spack/test/cmd/debug.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
lib/spack/spack/test/cmd/debug.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-09-21T14:35:49.000Z
|
2020-09-21T14:35:49.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import os
import os.path
from spack.main import SpackCommand
from spack.util.executable import which
debug = SpackCommand('debug')
@pytest.mark.db
def test_create_db_tarball(tmpdir, database):
with tmpdir.as_cwd():
debug('create-db-tarball')
# get the first non-dotfile to avoid coverage files in the directory
files = os.listdir(os.getcwd())
tarball_name = next(f for f in files if not f.startswith('.'))
# debug command made an archive
assert os.path.exists(tarball_name)
# print contents of archive
tar = which('tar')
contents = tar('tzf', tarball_name, output=str)
# DB file is included
assert 'index.json' in contents
# spec.yamls from all installs are included
for spec in database.query():
# externals won't have a spec.yaml
if spec.external:
continue
spec_suffix = '%s/.spack/spec.yaml' % spec.dag_hash()
assert spec_suffix in contents
| 28.159091
| 76
| 0.656981
|
4a062778bd5b0b4f43674622bfe12ec5a05c8b38
| 15,803
|
py
|
Python
|
msaf/features.py
|
Slliss/msaf
|
377b8a0423704029e49bfd0c60658955733096af
|
[
"MIT"
] | null | null | null |
msaf/features.py
|
Slliss/msaf
|
377b8a0423704029e49bfd0c60658955733096af
|
[
"MIT"
] | null | null | null |
msaf/features.py
|
Slliss/msaf
|
377b8a0423704029e49bfd0c60658955733096af
|
[
"MIT"
] | null | null | null |
"""
Each feature must inherit from the base class :class:`msaf.base.Features` to be
included in the whole framework.
Here is a list of all the available features:
.. autosummary::
:toctree: generated/
CQT
MFCC
PCP
Tonnetz
Tempogram
Features
"""
from builtins import super
import librosa
import numpy as np
# Local stuff
from msaf import config
from msaf.base import Features
from msaf.exceptions import FeatureParamsError
class CQT(Features):
"""This class contains the implementation of the Constant-Q Transform.
These features contain both harmonic and timbral content of the given
audio signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size, n_bins=config.cqt.bins,
norm=config.cqt.norm, filter_scale=config.cqt.filter_scale,
ref_power=config.cqt.ref_power):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
n_bins: int > 0
Number of frequency bins for the CQT.
norm: float
Type of norm to use for basis function normalization.
filter_scale: float
The scale of the filter for the CQT.
ref_power: str
The reference power for logarithmic scaling.
See `configdefaults.py` for the possible values.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the CQT parameters
self.n_bins = n_bins
self.norm = norm
self.filter_scale = filter_scale
if ref_power == "max":
self.ref_power = np.max
elif ref_power == "min":
self.ref_power = np.min
elif ref_power == "median":
self.ref_power = np.median
else:
raise FeatureParamsError("Wrong value for ref_power")
@classmethod
def get_id(self):
"""Identifier of these features."""
return "cqt"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
cqt: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
linear_cqt = np.abs(librosa.cqt(
self._audio, sr=self.sr, hop_length=self.hop_length,
n_bins=self.n_bins, norm=self.norm, filter_scale=self.filter_scale)
) ** 2
cqt = librosa.amplitude_to_db(linear_cqt, ref=self.ref_power).T
return cqt
class MFCC(Features):
"""This class contains the implementation of the MFCC Features.
The Mel-Frequency Cepstral Coefficients contain timbral content of a
given audio signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size, n_fft=config.n_fft,
n_mels=config.mfcc.n_mels, n_mfcc=config.mfcc.n_mfcc,
ref_power=config.mfcc.ref_power):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
n_fft: int > 0
Number of frames for the FFT.
n_mels: int > 0
Number of mel filters.
n_mfcc: int > 0
Number of mel coefficients.
ref_power: function
The reference power for logarithmic scaling.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the MFCC parameters
self.n_fft = n_fft
self.n_mels = n_mels
self.n_mfcc = n_mfcc
if ref_power == "max":
self.ref_power = np.max
elif ref_power == "min":
self.ref_power = np.min
elif ref_power == "median":
self.ref_power = np.median
else:
raise FeatureParamsError("Wrong value for ref_power")
@classmethod
def get_id(self):
"""Identifier of these features."""
return "mfcc"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
mfcc: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
S = librosa.feature.melspectrogram(self._audio,
sr=self.sr,
n_fft=self.n_fft,
hop_length=self.hop_length,
n_mels=self.n_mels)
log_S = librosa.amplitude_to_db(S, ref=self.ref_power)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=self.n_mfcc).T
return mfcc
class PCP(Features):
"""This class contains the implementation of the Pitch Class Profiles.
The PCPs contain harmonic content of a given audio signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size, n_bins=config.pcp.bins,
norm=config.pcp.norm, f_min=config.pcp.f_min,
n_octaves=config.pcp.n_octaves):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
n_bins: int > 0
Number of bins for the CQT computation.
norm: int > 0
Normalization parameter.
f_min: float > 0
Minimum frequency.
n_octaves: int > 0
Number of octaves.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the PCP parameters
self.n_bins = n_bins
self.norm = norm
self.f_min = f_min
self.n_octaves = n_octaves
@classmethod
def get_id(self):
"""Identifier of these features."""
return "pcp"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
pcp: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
audio_harmonic, _ = self.compute_HPSS()
pcp_cqt = np.abs(librosa.hybrid_cqt(audio_harmonic,
sr=self.sr,
hop_length=self.hop_length,
n_bins=self.n_bins,
norm=self.norm,
fmin=self.f_min)) ** 2
pcp = librosa.feature.chroma_cqt(C=pcp_cqt,
sr=self.sr,
hop_length=self.hop_length,
n_octaves=self.n_octaves,
fmin=self.f_min).T
return pcp
class Tonnetz(Features):
"""This class contains the implementation of the Tonal Centroids.
The Tonal Centroids (or Tonnetz) contain harmonic content of a given audio
signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size, n_bins=config.tonnetz.bins,
norm=config.tonnetz.norm, f_min=config.tonnetz.f_min,
n_octaves=config.tonnetz.n_octaves):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
n_bins: int > 0
Number of bins for the CQT computation.
norm: int > 0
Normalization parameter.
f_min: float > 0
Minimum frequency.
n_octaves: int > 0
Number of octaves.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the local parameters
self.n_bins = n_bins
self.norm = norm
self.f_min = f_min
self.n_octaves = n_octaves
@classmethod
def get_id(self):
"""Identifier of these features."""
return "tonnetz"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
tonnetz: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
pcp = PCP(self.file_struct, self.feat_type, self.sr, self.hop_length,
self.n_bins, self.norm, self.f_min, self.n_octaves).features
tonnetz = librosa.feature.tonnetz(chroma=pcp.T).T
return tonnetz
class Tempogram(Features):
"""This class contains the implementation of the Tempogram feature.
The Tempogram contains rhythmic content of a given audio signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size,
win_length=config.tempogram.win_length):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
win_length: int > 0
The size of the window for the tempogram.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the local parameters
self.win_length = win_length
@classmethod
def get_id(self):
"""Identifier of these features."""
return "tempogram"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
tempogram: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
return librosa.feature.tempogram(self._audio, sr=self.sr,
hop_length=self.hop_length,
win_length=self.win_length).T
class MultiFeature(Features):
"""This class contains the implementation of the MFCC+Chroma+zero-crossing+ Features.
The Mel-Frequency Cepstral Coefficients contain timbral content of a
given audio signal.
"""
def __init__(self, file_struct, feat_type, sr=config.sample_rate,
hop_length=config.hop_size, n_fft=config.n_fft,
n_mels=config.mfcc.n_mels, n_mfcc=config.mfcc.n_mfcc,
ref_power=config.mfcc.ref_power,
n_bins=config.cqt.bins,
norm=config.cqt.norm, filter_scale=config.cqt.filter_scale):
"""Constructor of the class.
Parameters
----------
file_struct: `msaf.input_output.FileStruct`
Object containing the file paths from where to extract/read
the features.
feat_type: `FeatureTypes`
Enum containing the type of features.
sr: int > 0
Sampling rate for the analysis.
hop_length: int > 0
Hop size in frames for the analysis.
n_fft: int > 0
Number of frames for the FFT.
n_mels: int > 0
Number of mel filters.
n_mfcc: int > 0
Number of mel coefficients.
ref_power: function
The reference power for logarithmic scaling.
"""
# Init the parent
super().__init__(file_struct=file_struct, sr=sr, hop_length=hop_length,
feat_type=feat_type)
# Init the MFCC parameters
self.n_fft = n_fft
self.n_mels = n_mels
self.n_mfcc = n_mfcc
# Init the CQT parameters
self.n_bins = n_bins
self.norm = norm
self.filter_scale = filter_scale
if ref_power == "max":
self.ref_power = np.max
elif ref_power == "min":
self.ref_power = np.min
elif ref_power == "median":
self.ref_power = np.median
else:
raise FeatureParamsError("Wrong value for ref_power")
@classmethod
def get_id(self):
"""Identifier of these features."""
return "multiFeature"
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
mfcc: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
S = librosa.feature.melspectrogram(self._audio,
sr=self.sr,
n_fft=self.n_fft,
hop_length=self.hop_length,
n_mels=self.n_mels)
log_S = librosa.amplitude_to_db(S, ref=self.ref_power)
mfccs = librosa.feature.mfcc(S=log_S, n_mfcc=self.n_mfcc).T
chroma_stft = librosa.feature.chroma_stft(self._audio,
sr=self.sr,
n_fft=self.n_fft,
hop_length=self.hop_length,n_chroma=12)
chroma_stft = np.array(chroma_stft, dtype='float64').transpose()
zero_crossing_rate = librosa.feature.zero_crossing_rate(self._audio,hop_length=self.hop_length)
zero_crossing_rate = np.array(zero_crossing_rate, dtype='float64').transpose()
linear_cqt = np.abs(librosa.cqt(
self._audio, sr=self.sr, hop_length=self.hop_length,
n_bins=self.n_bins, norm=self.norm, filter_scale=self.filter_scale)
) ** 2
cqt = librosa.amplitude_to_db(linear_cqt, ref=self.ref_power).T
full_features = np.concatenate((chroma_stft,cqt,mfccs,zero_crossing_rate), axis=1)
print(full_features.shape)
return full_features
| 35.672686
| 103
| 0.567361
|
4a06286d326b041d0e4ec5849ce531ede1ab5054
| 1,058
|
py
|
Python
|
week5/main.py
|
csMOOC/Stanford.Cryptography
|
13bd0e7188a7aded37a152378b6b1238736eaade
|
[
"MIT"
] | 1
|
2017-11-18T15:34:44.000Z
|
2017-11-18T15:34:44.000Z
|
week5/main.py
|
csMOOC/Stanford.Cryptography
|
13bd0e7188a7aded37a152378b6b1238736eaade
|
[
"MIT"
] | null | null | null |
week5/main.py
|
csMOOC/Stanford.Cryptography
|
13bd0e7188a7aded37a152378b6b1238736eaade
|
[
"MIT"
] | null | null | null |
from numbthy import *
p = 13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084171
g = 11717829880366207009516117596335367088558084999998952205599979459063929499736583746670572176471460312928594829675428279466566527115212748467589894601965568
h = 3239475104050450443565264378728065788649097520952449527834792452971981976143292558073856937958553180532878928001494706097394108577585732452307673444020333
#p = 1073676287
#g=1010343267
#h=857348958
#B = 2**10
B = 2**20
gb = powmod(g, B, p)
def modinv(a, m):
gcd, x, y = xgcd(a, m)
if gcd != 1:
return None # modular inverse does not exist
else:
return x % m
m = dict()
print "BEGIN BUILD TABLE"
for i in range(1, 2**20):
tmp = powmod(g, i, p)
tmpi = modinv(tmp, p)
m[ (h*tmpi) % p] = i
print "BUILD TABLE FINISH"
for i in range(1, 2**20):
tmp = powmod(gb, i, p)
if tmp in m:
print B*i+m[tmp]
print i
break;
print "RUN FINISH"
| 24.045455
| 159
| 0.73535
|
4a062883f5c9737f5c71c1fdaed91680571e3daa
| 21,669
|
py
|
Python
|
promptwithoptions/promptwithoptions.py
|
silkyanteater/promptwithoptions
|
a7c2d0b6936d90d4a6eaa1e56f179e58e26988bb
|
[
"MIT"
] | null | null | null |
promptwithoptions/promptwithoptions.py
|
silkyanteater/promptwithoptions
|
a7c2d0b6936d90d4a6eaa1e56f179e58e26988bb
|
[
"MIT"
] | null | null | null |
promptwithoptions/promptwithoptions.py
|
silkyanteater/promptwithoptions
|
a7c2d0b6936d90d4a6eaa1e56f179e58e26988bb
|
[
"MIT"
] | null | null | null |
import sys
import shlex
from collections.abc import Iterable
# TODO: add a field type that is free text but with options to make it more convenient
ARGUMENT_NAMES = (
"prompt",
"options",
"data_type",
"default",
"allow_empty",
"allow_multiple",
"allow_repetitive",
"show_confirmation",
"hide_key",
"hide_questionmark",
"hide_mandatory_sign",
"hide_multiple_choice_sign",
"no_interaction",
"options_line_color",
"options_number_color",
"input_line_color",
"confirm_line_color",
)
DEFAULTS = dict()
def cformat(text, color=None):
return text if color is None else f"{color}{text}\u001b[0m"
def cprint(text, color=None):
print(cformat(text, color))
def cinput(text, color=None):
return input(cformat(text, color))
def is_ref_in_option(option, ref):
return str(ref) in tuple(str(o) for o in option)
def get_option(options, ref):
try:
ref_int = int(ref)
except:
ref_int = None
if ref_int is not None and ref_int >= 1 and ref_int <= len(options):
return options[ref_int - 1]
else:
for option in options:
if is_ref_in_option(option, ref):
return option
def get_formatted_prompt(
prompt,
options,
data_type,
default,
allow_empty,
allow_multiple,
hide_key,
hide_questionmark,
hide_mandatory_sign,
hide_multiple_choice_sign,
input_line_color,
):
formatted_prompt = f"{prompt}%s" % ("" if hide_questionmark is True else "?",)
if allow_empty is not True and hide_mandatory_sign is not True:
formatted_prompt += "*"
if allow_multiple is True and hide_multiple_choice_sign is not True:
formatted_prompt += "…"
formatted_prompt += " "
if data_type is bool:
if isinstance(default, Iterable) and not isinstance(default, str):
normalised_bool_default = tuple(map(normalise_value_to_YN, default))
else:
normalised_bool_default = (str(default),)
new_normalised_bool_default = tuple()
for response_item in normalised_bool_default:
new_normalised_bool_default += tuple(
normalise_value_to_YN(x)
for x in split_escaped_comma_separated_string(response_item)
)
normalised_bool_default = new_normalised_bool_default
if len(normalised_bool_default) == 1:
if normalised_bool_default[0] == "Y":
bool_choice = "Y/n"
elif normalised_bool_default[0] == "N":
bool_choice = "y/N"
else:
bool_choice = "y/n"
else:
bool_choice = "y/n"
formatted_prompt = f"{formatted_prompt}({bool_choice}) "
if len(normalised_bool_default) > 1:
formatted_prompt = (
f"{formatted_prompt}({','.join(normalised_bool_default)}) "
)
else:
if default is not None:
if isinstance(default, Iterable) and not isinstance(default, str):
if options is None:
formatted_prompt = f"{formatted_prompt}(%s) " % (
", ".join(str(x) if x != "" else "''" for x in default),
)
else:
formatted_prompt = f"{formatted_prompt}(%s) " % (
", ".join(
get_option_str(
get_option(options, str(x)), hide_key=hide_key
)
for x in default
),
)
else:
if options is None:
formatted_prompt = f"{formatted_prompt}(%s) " % (
default if default != "" else "''",
)
else:
formatted_prompt = f"{formatted_prompt}(%s) " % (
get_option_str(get_option(options, default), hide_key=hide_key),
)
return cformat(formatted_prompt, input_line_color)
def get_option_str(option, hide_key=None):
if hide_key is True and len(option) > 1:
return " - ".join(str(o) if o != "" else "''" for o in option[1:])
else:
return " - ".join(str(o) if o != "" else "''" for o in option)
def get_formatted_option(
option, hide_key=None, options_line_color=None, options_number_color=None
):
return cformat(get_option_str(option, hide_key=hide_key), options_line_color)
def print_formatted_options(
options, hide_key=None, options_line_color=None, options_number_color=None
):
if options is None:
return
formatted_options = list()
for index, option in enumerate(options):
formatted_option = (
cformat(str(index + 1), options_number_color)
+ " > "
+ get_formatted_option(
option,
hide_key=hide_key,
options_line_color=options_line_color,
options_number_color=options_line_color,
)
)
formatted_options.append(formatted_option)
print("\n".join(formatted_options))
def print_formatted_confirmation(
prompt, response, hide_questionmark, confirm_line_color
):
if hide_questionmark is True:
# we suppose the user provides it so we're not adding ':'
cprint(
f"{prompt} %s" % (response if response != "" else "''",), confirm_line_color
)
else:
cprint(
f"{prompt}: %s" % (response if response != "" else "''",),
confirm_line_color,
)
def normalise_value_to_YN(value):
if value is True or (isinstance(value, str) and value.lower() in ("y", "yes")):
return "Y"
elif value is False or (isinstance(value, str) and value.lower() in ("n", "no")):
return "N"
def normalise_options(options):
if options is None:
return
if isinstance(options, dict):
options = tuple(options.items())
_options = list()
for option in options:
if isinstance(option, Iterable) and not isinstance(option, str):
_options.append(tuple(str(i) for i in option))
else:
_options.append((str(option),))
return _options
def clear_back_last_input():
sys.stdout.write("\033[F\033[K")
def split_escaped_comma_separated_string(the_string):
try:
splitter = shlex.shlex(the_string, posix=True)
splitter.whitespace = ","
splitter.whitespace_split = True
return tuple(s.strip() for s in splitter)
except ValueError:
return None
def resolve_defaults(locals, variable_names):
defaults = dict()
for variable_name in variable_names:
variable = locals[variable_name]
defaults[variable_name] = (
None
if variable == "_None_"
else DEFAULTS.get(variable_name)
if variable is None
else variable
)
return defaults
def validate_arguments(
*,
prompt=None,
options=None,
data_type=None,
default=None,
allow_empty=None,
allow_multiple=None,
allow_repetitive=None,
show_confirmation=None,
hide_key=None,
hide_questionmark=None,
hide_mandatory_sign=None,
hide_multiple_choice_sign=None,
no_interaction=None,
options_line_color=None,
options_number_color=None,
input_line_color=None,
confirm_line_color=None,
):
if prompt is not None:
if not isinstance(prompt, str):
raise TypeError("prompt: string expected")
if options is not None:
if not isinstance(options, Iterable) or isinstance(options, str):
raise TypeError("options: iterable expected")
if len(options) != len(set(options)):
raise TypeError("options: unique items expected")
if data_type is not None:
if not callable(data_type):
raise TypeError("data_type: callable expected")
if data_type is bool and options is not None:
raise TypeError("options: only None is accepted when data_type is bool")
if data_type is not None and options is not None:
invalid_options = list()
for option in normalise_options(options):
try:
data_type(option[0])
except:
invalid_options.append(option)
if invalid_options:
raise TypeError(
f"options: data_type validation failed: {', '.join(get_option_str(o) for o in invalid_options)}"
)
if default is not None:
if isinstance(default, str):
if allow_multiple is True:
default_parts = split_escaped_comma_separated_string(default)
else:
default_parts = (default,)
elif isinstance(default, Iterable):
default_parts = tuple(
"Y" if part is True else "N" if part is False else str(part)
for part in default
)
elif isinstance(default, bool):
default_parts = (normalise_value_to_YN(default),)
else:
default_parts = (
"Y" if default is True else "N" if default is False else str(default),
)
if allow_empty is not True and len(default_parts) == 0:
raise TypeError(
f"default: empty value is invalid when allow_empty is not True"
)
if allow_multiple is not True and len(default_parts) > 1:
raise TypeError(
f"default: multiple values found when allow_multiple is not True"
)
if allow_repetitive is not True:
if options is not None:
normalised_default_parts = tuple(
get_option(normalise_options(options), part)
for part in default_parts
)
if len(normalised_default_parts) != len(set(normalised_default_parts)):
raise TypeError(
f"default: repetitive elements found when allow_repetitive is not True"
)
else:
if len(default_parts) != len(set(default_parts)):
raise TypeError(
f"default: repetitive elements found when allow_repetitive is not True"
)
invalid_parts = list()
if options is None:
if data_type is not None:
if data_type is bool:
for default_part in default_parts:
if normalise_value_to_YN(default_part) is None:
invalid_parts.append(default_part)
else:
for default_part in default_parts:
try:
data_type(default_part)
except:
invalid_parts.append(default_part)
if invalid_parts:
raise TypeError(
f"default: type of data_type expected, got {', '.join(invalid_parts)}"
)
else:
for default_part in default_parts:
if get_option(normalise_options(options), default_part) is None:
invalid_parts.append(default_part)
if invalid_parts:
raise TypeError(
f"default: must be in options, got {', '.join(invalid_parts)}"
)
if allow_empty is not None and not isinstance(allow_empty, bool):
raise TypeError("allow_empty: bool expected")
if allow_multiple is not None and not isinstance(allow_multiple, bool):
raise TypeError("allow_multiple: bool expected")
if allow_repetitive is not None and not isinstance(allow_repetitive, bool):
raise TypeError("allow_repetitive: bool expected")
if show_confirmation is not None and not isinstance(show_confirmation, bool):
raise TypeError("show_confirmation: bool expected")
if hide_key is not None and not isinstance(hide_key, bool):
raise TypeError("hide_key: bool expected")
if hide_questionmark is not None and not isinstance(hide_questionmark, bool):
raise TypeError("hide_questionmark: bool expected")
if hide_mandatory_sign is not None and not isinstance(hide_mandatory_sign, bool):
raise TypeError("hide_mandatory_sign: bool expected")
if hide_multiple_choice_sign is not None and not isinstance(
hide_multiple_choice_sign, bool
):
raise TypeError("hide_multiple_choice_sign: bool expected")
if no_interaction is not None and not isinstance(no_interaction, bool):
raise TypeError("no_interaction: bool expected")
if options_line_color is not None and not isinstance(options_line_color, str):
raise TypeError("options_line_color: str expected")
if options_number_color is not None and not isinstance(options_number_color, str):
raise TypeError("options_number_color: str expected")
if input_line_color is not None and not isinstance(input_line_color, str):
raise TypeError("input_line_color: str expected")
if confirm_line_color is not None and not isinstance(confirm_line_color, str):
raise TypeError("confirm_line_color: str expected")
def set_prompt_defaults(
prompt=None,
options=None,
data_type=None,
default=None,
allow_empty=None,
allow_multiple=None,
allow_repetitive=None,
show_confirmation=None,
hide_key=None,
hide_questionmark=None,
hide_mandatory_sign=None,
hide_multiple_choice_sign=None,
no_interaction=None,
options_line_color=None,
options_number_color=None,
input_line_color=None,
confirm_line_color=None,
):
_DEFAULTS = resolve_defaults(locals(), ARGUMENT_NAMES)
validate_arguments(**_DEFAULTS)
DEFAULTS.update(_DEFAULTS)
def reset_prompt_defaults():
DEFAULTS.clear()
def promptwithoptions(
prompt=None,
options=None,
data_type=None,
default=None,
allow_empty=None,
allow_multiple=None,
allow_repetitive=None,
show_confirmation=None,
hide_key=None,
hide_questionmark=None,
hide_mandatory_sign=None,
hide_multiple_choice_sign=None,
no_interaction=None,
options_line_color=None,
options_number_color=None,
input_line_color=None,
confirm_line_color=None,
):
arguments = resolve_defaults(locals(), ARGUMENT_NAMES)
validate_arguments(**arguments)
prompt = arguments["prompt"]
options = arguments["options"]
data_type = arguments["data_type"]
default = arguments["default"]
allow_empty = arguments["allow_empty"]
allow_multiple = arguments["allow_multiple"]
allow_repetitive = arguments["allow_repetitive"]
show_confirmation = arguments["show_confirmation"]
hide_key = arguments["hide_key"]
hide_questionmark = arguments["hide_questionmark"]
hide_mandatory_sign = arguments["hide_mandatory_sign"]
hide_multiple_choice_sign = arguments["hide_multiple_choice_sign"]
no_interaction = arguments["no_interaction"]
options_line_color = arguments["options_line_color"]
options_number_color = arguments["options_number_color"]
input_line_color = arguments["input_line_color"]
confirm_line_color = arguments["confirm_line_color"]
options = normalise_options(options)
print_formatted_options(options, hide_key, options_line_color, options_number_color)
response = None
while response is None:
if no_interaction is True and default is not None:
print(
get_formatted_prompt(
prompt,
options,
data_type,
default,
allow_empty,
allow_multiple,
hide_key,
hide_questionmark,
hide_mandatory_sign,
hide_multiple_choice_sign,
input_line_color,
)
+ (str(default) if default != "" else "''")
)
response = ""
else:
response = input(
get_formatted_prompt(
prompt,
options,
data_type,
default,
allow_empty,
allow_multiple,
hide_key,
hide_questionmark,
hide_mandatory_sign,
hide_multiple_choice_sign,
input_line_color,
)
)
if response == "" and default is not None:
if isinstance(default, Iterable) and not isinstance(default, str):
default_response = tuple(default)
else:
default_response = (str(default),)
new_default_response = tuple()
if data_type is bool:
new_default_response = tuple(
normalise_value_to_YN(x) for x in default_response
)
else:
if allow_multiple is True:
for response_item in default_response:
new_default_response += split_escaped_comma_separated_string(
str(response_item)
)
else:
new_default_response = default_response
default_response = new_default_response
if allow_multiple is not True and len(default_response) > 1:
response = None
clear_back_last_input()
continue
response = default_response or ""
break
if response in ("", "-"):
response = ""
if allow_empty is True:
break
if response == "":
response = None
clear_back_last_input()
continue
if allow_multiple is True:
response = split_escaped_comma_separated_string(response)
else:
response = (response,)
if (
response is None
or len(response) == 0
or (allow_multiple is not True and len(response) > 1)
):
response = None
clear_back_last_input()
continue
if data_type is bool:
continue_loop = False
normalised_response = list()
for response_item in response:
response_item_bool = normalise_value_to_YN(response_item)
if response_item_bool is None:
continue_loop = True
break
else:
if (
allow_repetitive is not True
and response_item_bool in normalised_response
):
continue_loop = True
break
else:
normalised_response.append(response_item_bool)
if continue_loop is True:
response = None
clear_back_last_input()
continue
else:
response = tuple(normalised_response)
break
if options is None:
if data_type is not None:
try:
for response_item in response:
data_type(response_item)
except:
response = None
clear_back_last_input()
continue
if allow_repetitive is not True and len(response) != len(set(response)):
response = None
clear_back_last_input()
continue
break
else:
if len(response) > 1 and allow_multiple is not True:
response = None
clear_back_last_input()
continue
response_options = list()
for response_item in response:
response_option = get_option(options, response_item)
if response_option is None:
response = None
clear_back_last_input()
continue
else:
response_options.append(response_option)
if allow_repetitive is not True and len(response_options) != len(
set(response_options)
):
response = None
clear_back_last_input()
continue
if options is None:
response_value = response
if data_type is bool:
response_value_str = ", ".join(
"Yes" if v == "Y" else "No" if v == "N" else "N/A"
for v in response_value
)
else:
response_value_str = ", ".join(response_value)
else:
response_options = tuple(
get_option(options, response_item) for response_item in response
)
response_value = tuple(
response_option[0] for response_option in response_options
)
response_value_str = ", ".join(
get_option_str(response_option, hide_key)
for response_option in response_options
)
if show_confirmation is True:
print_formatted_confirmation(
prompt, response_value_str, hide_questionmark, confirm_line_color
)
if len(response_value) == 1 and allow_multiple is not True:
response_value = response_value[0]
return response_value
| 34.837621
| 112
| 0.582214
|
4a062944876b8fc55f977066984d8f2893f92029
| 15,355
|
py
|
Python
|
HW7/model_2.py
|
Pyrojewel-zard/ML
|
d8a11d893eed3e889b9af0d6aeb3ab08cd60d997
|
[
"MIT"
] | 5
|
2021-11-26T10:05:03.000Z
|
2022-03-17T11:45:46.000Z
|
HW7/model_2.py
|
Pyrojewel-zard/ML
|
d8a11d893eed3e889b9af0d6aeb3ab08cd60d997
|
[
"MIT"
] | null | null | null |
HW7/model_2.py
|
Pyrojewel-zard/ML
|
d8a11d893eed3e889b9af0d6aeb3ab08cd60d997
|
[
"MIT"
] | 1
|
2022-01-09T02:17:19.000Z
|
2022-01-09T02:17:19.000Z
|
# -*- coding: utf-8 -*-
"""「hw7_bert」的副本
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1X0HkHZ8MqyDiO1oIuduZ70PCIj-LQGbh
# **Homework 7 - Bert (Question Answering)**
If you have any questions, feel free to email us at ntu-ml-2021spring-ta@googlegroups.com
Slide: [Link](https://docs.google.com/presentation/d/1aQoWogAQo_xVJvMQMrGaYiWzuyfO0QyLLAhiMwFyS2w) Kaggle: [Link](https://www.kaggle.com/c/ml2021-spring-hw7) Data: [Link](https://drive.google.com/uc?id=1znKmX08v9Fygp-dgwo7BKiLIf2qL1FH1)
## Task description
- Chinese Extractive Question Answering
- Input: Paragraph + Question
- Output: Answer
- Objective: Learn how to fine tune a pretrained model on downstream task using transformers
- Todo
- Fine tune a pretrained chinese BERT model
- Change hyperparameters (e.g. doc_stride)
- Apply linear learning rate decay
- Try other pretrained models
- Improve preprocessing
- Improve postprocessing
- Training tips
- Automatic mixed precision
- Gradient accumulation
- Ensemble
- Estimated training time (tesla t4 with automatic mixed precision enabled)
- Simple: 8mins
- Medium: 8mins
- Strong: 25mins
- Boss: 2hrs
## Download Dataset
"""
# Download link 1
!gdown --id '1znKmX08v9Fygp-dgwo7BKiLIf2qL1FH1' --output hw7_data.zip
# Download Link 2 (if the above link fails)
# !gdown --id '1pOu3FdPdvzielUZyggeD7KDnVy9iW1uC' --output hw7_data.zip
!unzip -o hw7_data.zip
# For this HW, K80 < P4 < T4 < P100 <= T4(fp16) < V100
!nvidia-smi
"""## Install transformers
Documentation for the toolkit: https://huggingface.co/transformers/
"""
# You are allowed to change version of transformers or use other toolkits
!pip install transformers==4.5.0
"""## Import Packages"""
import json
import numpy as np
import random
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import AdamW, BertForQuestionAnswering, BertTokenizerFast
from tqdm.auto import tqdm
device = "cuda" if torch.cuda.is_available() else "cpu"
# Fix random seed for reproducibility
def same_seeds(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
same_seeds(0)
# Change "fp16_training" to True to support automatic mixed precision training (fp16)
fp16_training = True
if fp16_training:
!pip install accelerate==0.2.0
from accelerate import Accelerator
accelerator = Accelerator(fp16=True)
device = accelerator.device
# Documentation for the toolkit: https://huggingface.co/docs/accelerate/
"""## Load Model and Tokenizer
"""
from transformers import AutoModelForQuestionAnswering, BertTokenizer
from transformers import XLNetTokenizer,XLNetModel
model = AutoModelForQuestionAnswering.from_pretrained("luhua/chinese_pretrain_mrc_roberta_wwm_ext_large").to(device)
tokenizer = BertTokenizerFast.from_pretrained("luhua/chinese_pretrain_mrc_roberta_wwm_ext_large")
# You can safely ignore the warning message (it pops up because new prediction heads for QA are initialized randomly)
"""## Read Data
- Training set: 26935 QA pairs
- Dev set: 3523 QA pairs
- Test set: 3492 QA pairs
- {train/dev/test}_questions:
- List of dicts with the following keys:
- id (int)
- paragraph_id (int)
- question_text (string)
- answer_text (string)
- answer_start (int)
- answer_end (int)
- {train/dev/test}_paragraphs:
- List of strings
- paragraph_ids in questions correspond to indexs in paragraphs
- A paragraph may be used by several questions
"""
def read_data(file):
with open(file, 'r', encoding="utf-8") as reader:
data = json.load(reader)
return data["questions"], data["paragraphs"]
train_questions, train_paragraphs = read_data("hw7_train.json")
dev_questions, dev_paragraphs = read_data("hw7_dev.json")
test_questions, test_paragraphs = read_data("hw7_test.json")
"""## Tokenize Data"""
# Tokenize questions and paragraphs separately
# 「add_special_tokens」 is set to False since special tokens will be added when tokenized questions and paragraphs are combined in datset __getitem__
train_questions_tokenized = tokenizer([train_question["question_text"] for train_question in train_questions], add_special_tokens=False)
dev_questions_tokenized = tokenizer([dev_question["question_text"] for dev_question in dev_questions], add_special_tokens=False)
test_questions_tokenized = tokenizer([test_question["question_text"] for test_question in test_questions], add_special_tokens=False)
train_paragraphs_tokenized = tokenizer(train_paragraphs, add_special_tokens=False)
dev_paragraphs_tokenized = tokenizer(dev_paragraphs, add_special_tokens=False)
test_paragraphs_tokenized = tokenizer(test_paragraphs, add_special_tokens=False)
# You can safely ignore the warning message as tokenized sequences will be futher processed in datset __getitem__ before passing to model
"""## Dataset and Dataloader"""
class QA_Dataset(Dataset):
def __init__(self, split, questions, tokenized_questions, tokenized_paragraphs):
self.split = split
self.questions = questions
self.tokenized_questions = tokenized_questions
self.tokenized_paragraphs = tokenized_paragraphs
self.max_question_len = 40
self.max_paragraph_len = 150
##### TODO: Change value of doc_stride #####
self.doc_stride = 80
# Input sequence length = [CLS] + question + [SEP] + paragraph + [SEP]
self.max_seq_len = 1 + self.max_question_len + 1 + self.max_paragraph_len + 1
def __len__(self):
return len(self.questions)
def __getitem__(self, idx):
question = self.questions[idx]
tokenized_question = self.tokenized_questions[idx]
tokenized_paragraph = self.tokenized_paragraphs[question["paragraph_id"]]
x = random.randint(-30,30) #隨機數加在mid上
##### TODO: Preprocessing #####
# Hint: How to prevent model from learning something it should not learn
if self.split == "train":
# Convert answer's start/end positions in paragraph_text to start/end positions in tokenized_paragraph
answer_start_token = tokenized_paragraph.char_to_token(question["answer_start"])
answer_end_token = tokenized_paragraph.char_to_token(question["answer_end"])
# A single window is obtained by slicing the portion of paragraph containing the answer
mid = ((answer_start_token + answer_end_token) // 2) + x
paragraph_start = max(0, min(mid - self.max_paragraph_len // 2, len(tokenized_paragraph) - self.max_paragraph_len))
paragraph_end = paragraph_start + self.max_paragraph_len
# Slice question/paragraph and add special tokens (101: CLS, 102: SEP)
input_ids_question = [101] + tokenized_question.ids[:self.max_question_len] + [102]
input_ids_paragraph = tokenized_paragraph.ids[paragraph_start : paragraph_end] + [102]
# Convert answer's start/end positions in tokenized_paragraph to start/end positions in the window
answer_start_token += len(input_ids_question) - paragraph_start
answer_end_token += len(input_ids_question) - paragraph_start
# Pad sequence and obtain inputs to model
input_ids, token_type_ids, attention_mask = self.padding(input_ids_question, input_ids_paragraph)
return torch.tensor(input_ids), torch.tensor(token_type_ids), torch.tensor(attention_mask), answer_start_token, answer_end_token
# Validation/Testing
else:
input_ids_list, token_type_ids_list, attention_mask_list = [], [], []
# Paragraph is split into several windows, each with start positions separated by step "doc_stride"
for i in range(0, len(tokenized_paragraph), self.doc_stride):
# Slice question/paragraph and add special tokens (101: CLS, 102: SEP)
input_ids_question = [101] + tokenized_question.ids[:self.max_question_len] + [102]
input_ids_paragraph = tokenized_paragraph.ids[i : i + self.max_paragraph_len] + [102]
# Pad sequence and obtain inputs to model
input_ids, token_type_ids, attention_mask = self.padding(input_ids_question, input_ids_paragraph)
input_ids_list.append(input_ids)
token_type_ids_list.append(token_type_ids)
attention_mask_list.append(attention_mask)
return torch.tensor(input_ids_list), torch.tensor(token_type_ids_list), torch.tensor(attention_mask_list)
def padding(self, input_ids_question, input_ids_paragraph):
# Pad zeros if sequence length is shorter than max_seq_len
padding_len = self.max_seq_len - len(input_ids_question) - len(input_ids_paragraph)
# Indices of input sequence tokens in the vocabulary
input_ids = input_ids_question + input_ids_paragraph + [0] * padding_len
# Segment token indices to indicate first and second portions of the inputs. Indices are selected in [0, 1]
token_type_ids = [0] * len(input_ids_question) + [1] * len(input_ids_paragraph) + [0] * padding_len
# Mask to avoid performing attention on padding token indices. Mask values selected in [0, 1]
attention_mask = [1] * (len(input_ids_question) + len(input_ids_paragraph)) + [0] * padding_len
return input_ids, token_type_ids, attention_mask
train_set = QA_Dataset("train", train_questions, train_questions_tokenized, train_paragraphs_tokenized)
dev_set = QA_Dataset("dev", dev_questions, dev_questions_tokenized, dev_paragraphs_tokenized)
test_set = QA_Dataset("test", test_questions, test_questions_tokenized, test_paragraphs_tokenized)
train_batch_size = 16
# Note: Do NOT change batch size of dev_loader / test_loader !
# Although batch size=1, it is actually a batch consisting of several windows from the same QA pair
train_loader = DataLoader(train_set, batch_size=train_batch_size, shuffle=True, pin_memory=True)
dev_loader = DataLoader(dev_set, batch_size=1, shuffle=False, pin_memory=True)
test_loader = DataLoader(test_set, batch_size=1, shuffle=False, pin_memory=True)
"""## Function for Evaluation"""
def evaluate(data, output):
##### TODO: Postprocessing #####
# There is a bug and room for improvement in postprocessing
# Hint: Open your prediction file to see what is wrong
answer = ''
max_prob = float('-inf')
num_of_windows = data[0].shape[1]
for k in range(num_of_windows):
# Obtain answer by choosing the most probable start position / end position
start_prob, start_index = torch.max(output.start_logits[k], dim=0)
end_prob, end_index = torch.max(output.end_logits[k], dim=0)
# Probability of answer is calculated as sum of start_prob and end_prob
prob = start_prob + end_prob
# Replace answer if calculated probability is larger than previous windows
if prob > max_prob:
max_prob = prob
# Convert tokens to chars (e.g. [1920, 7032] --> "大 金")
answer = tokenizer.decode(data[0][0][k][start_index : end_index + 1])
# Remove spaces in answer (e.g. "大 金" --> "大金")
return answer.replace(' ','')
"""## Training"""
num_epoch = 1
validation = True
logging_step = 100
learning_rate = 1e-4
optimizer = AdamW(model.parameters(), lr=learning_rate)
if fp16_training:
model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)
model.train()
print("Start Training ...")
for epoch in range(num_epoch):
step = 1
train_loss = train_acc = 0
for data in tqdm(train_loader):
# Load all data into GPU
data = [i.to(device) for i in data]
# Model inputs: input_ids, token_type_ids, attention_mask, start_positions, end_positions (Note: only "input_ids" is mandatory)
# Model outputs: start_logits, end_logits, loss (return when start_positions/end_positions are provided)
output = model(input_ids=data[0], token_type_ids=data[1], attention_mask=data[2], start_positions=data[3], end_positions=data[4])
# Choose the most probable start position / end position
start_index = torch.argmax(output.start_logits, dim=1)
end_index = torch.argmax(output.end_logits, dim=1)
# Prediction is correct only if both start_index and end_index are correct
train_acc += ((start_index == data[3]) & (end_index == data[4])).float().mean()
train_loss += output.loss
if fp16_training:
accelerator.backward(output.loss)
else:
output.loss.backward()
optimizer.step()
optimizer.zero_grad()
step += 1
##### TODO: Apply linear learning rate decay #####
optimizer.param_groups[0]["lr"] -= learning_rate/1684
# Print training loss and accuracy over past logging step
if step % logging_step == 0:
print(f"Epoch {epoch + 1} | Step {step} | loss = {train_loss.item() / logging_step:.3f}, acc = {train_acc / logging_step:.3f}")
train_loss = train_acc = 0
if validation:
print("Evaluating Dev Set ...")
model.eval()
with torch.no_grad():
dev_acc = 0
for i, data in enumerate(tqdm(dev_loader)):
output = model(input_ids=data[0].squeeze(dim=0).to(device), token_type_ids=data[1].squeeze(dim=0).to(device),
attention_mask=data[2].squeeze(dim=0).to(device))
# prediction is correct only if answer text exactly matches
dev_acc += evaluate(data, output) == dev_questions[i]["answer_text"]
print(f"Validation | Epoch {epoch + 1} | acc = {dev_acc / len(dev_loader):.3f}")
model.train()
# Save a model and its configuration file to the directory 「saved_model」
# i.e. there are two files under the direcory 「saved_model」: 「pytorch_model.bin」 and 「config.json」
# Saved model can be re-loaded using 「model = BertForQuestionAnswering.from_pretrained("saved_model")」
print("Saving Model ...")
model_save_dir = "saved_model"
model.save_pretrained(model_save_dir)
"""## Testing"""
print("Evaluating Test Set ...")
result = []
model.eval()
with torch.no_grad():
for data in tqdm(test_loader):
output = model(input_ids=data[0].squeeze(dim=0).to(device), token_type_ids=data[1].squeeze(dim=0).to(device),
attention_mask=data[2].squeeze(dim=0).to(device))
result.append(evaluate(data, output))
result_file = "result.csv"
with open(result_file, 'w') as f:
f.write("ID,Answer\n")
for i, test_question in enumerate(test_questions):
# Replace commas in answers with empty strings (since csv is separated by comma)
# Answers in kaggle are processed in the same way
f.write(f"{test_question['id']},{result[i].replace(',','')}\n")
print(f"Completed! Result is in {result_file}")
| 41.16622
| 239
| 0.699056
|
4a062a2c96a41011232d32d9cd7f243f7cd3bcb6
| 16,733
|
py
|
Python
|
sunpy/net/fido_factory.py
|
aringlis/sunpy
|
5bcc7ea2c4319fc777b1ba03075c0adf75d69cf5
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/net/fido_factory.py
|
aringlis/sunpy
|
5bcc7ea2c4319fc777b1ba03075c0adf75d69cf5
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/net/fido_factory.py
|
aringlis/sunpy
|
5bcc7ea2c4319fc777b1ba03075c0adf75d69cf5
|
[
"BSD-2-Clause"
] | null | null | null |
"""
This module provides the `Fido
<sunpy.net.fido_factory.UnifiedDownloaderFactory>` instance of
`sunpy.net.fido_factory.UnifiedDownloaderFactory` it also provides the
`~sunpy.net.fido_factory.UnifiedResponse` class which
`Fido.search <sunpy.net.fido_factory.UnifiedDownloaderFactory.search>` returns and the
`~sunpy.net.fido_factory.DownloadResponse` class that is returned by
`Fido.fetch <sunpy.net.fido_factory.UnifiedDownloaderFactory.fetch>`.
"""
from collections import Sequence
from parfive import Downloader, Results
from sunpy.util.datatype_factory_base import BasicRegistrationFactory
from sunpy.util.datatype_factory_base import NoMatchError
from sunpy.util.datatype_factory_base import MultipleMatchError
from sunpy.net.base_client import BaseClient
from sunpy.net.dataretriever.client import QueryResponse
from sunpy.net.vso import VSOClient, QueryResponse as vsoQueryResponse
from sunpy.net import attr
from sunpy.net import attrs as a
__all__ = ['Fido', 'UnifiedResponse', 'UnifiedDownloaderFactory']
class UnifiedResponse(Sequence):
"""
The object used to store results from `~sunpy.net.UnifiedDownloaderFactory.search`.
The `~sunpy.net.Fido` object returns results from multiple different
clients. So it is always possible to sub-select these results, you can
index this object with two indices. The first index is the client index,
i.e. corresponding to the results from the `~sunpy.net.vso.VSOClient`. The
second index can be used to select records from the results returned from
that client, for instance if you only want every second result you could
index the second dimension with ``::2``.
"""
def __init__(self, lst):
"""
Parameters
----------
lst : `object`
A single instance or an iterable of ``(QueryResponse, client)``
pairs or ``QueryResponse`` objects with a ``.client`` attribute.
"""
tmplst = []
# numfile is the number of files not the number of results.
self._numfile = 0
if isinstance(lst, (QueryResponse, vsoQueryResponse)):
if not hasattr(lst, 'client'):
raise ValueError(
("A {} object is only a valid input to UnifiedResponse "
"if it has a client attribute.").
format(type(lst).__name__))
tmplst.append(lst)
self._numfile = len(lst)
else:
for block in lst:
if isinstance(block, tuple) and len(block) == 2:
block[0].client = block[1]
tmplst.append(block[0])
self._numfile += len(block[0])
elif hasattr(block, 'client'):
tmplst.append(block)
self._numfile += len(block)
else:
raise ValueError(
"{} is not a valid input to UnifiedResponse.".format(type(lst)))
self._list = tmplst
def __len__(self):
return len(self._list)
def __iter__(self):
return self.responses
def _handle_record_slice(self, client_resp, record_slice):
"""
Given a slice to be applied to the results from a single client, return
an object of the same type as client_resp.
"""
# When we subindex, we want to persist the type of the response object.
resp_type = type(client_resp)
# Make sure we always have an iterable, as most of the response objects
# expect one.
if isinstance(record_slice, int):
resp = [client_resp[record_slice]]
else:
resp = client_resp[record_slice]
# Reconstruct a response object with the sub-indexed records.
ret = resp_type(resp)
# Make sure we pass the client back out again.
ret.client = client_resp.client
return ret
def __getitem__(self, aslice):
"""
Support slicing the UnifiedResponse as a 2D object.
The first index is to the client and the second index is the records
returned from those clients.
"""
# Just a single int as a slice, we are just indexing client.
if isinstance(aslice, (int, slice)):
ret = self._list[aslice]
# Make sure we only have a length two slice.
elif isinstance(aslice, tuple):
if len(aslice) > 2:
raise IndexError("UnifiedResponse objects can only "
"be sliced with one or two indices.")
# Indexing both client and records, but only for one client.
if isinstance(aslice[0], int):
client_resp = self._list[aslice[0]]
ret = self._handle_record_slice(client_resp, aslice[1])
# Indexing both client and records for multiple clients.
else:
intermediate = self._list[aslice[0]]
ret = []
for client_resp in intermediate:
resp = self._handle_record_slice(client_resp, aslice[1])
ret.append(resp)
else:
raise IndexError("UnifiedResponse objects must be sliced with integers.")
return UnifiedResponse(ret)
def get_response(self, i):
"""
Get the actual response rather than another UnifiedResponse object.
"""
return self._list[i]
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
Returns
-------
s : list
List of strings, containing attribute names in the response blocks.
"""
s = self.get_response(0).response_block_properties()
for i in range(1, len(self)):
s.intersection(self.get_response(i).response_block_properties())
return s
@property
def responses(self):
"""
A generator of all the `sunpy.net.dataretriever.client.QueryResponse`
objects contained in the `~sunpy.net.fido_factory.UnifiedResponse`
object.
"""
for i in range(len(self)):
yield self.get_response(i)
@property
def file_num(self):
return self._numfile
def _repr_html_(self):
nprov = len(self)
if nprov == 1:
ret = 'Results from {} Provider:</br></br>'.format(len(self))
else:
ret = 'Results from {} Providers:</br></br>'.format(len(self))
for block in self.responses:
ret += "{} Results from the {}:</br>".format(len(block),
block.client.__class__.__name__)
ret += block._repr_html_()
ret += '</br>'
return ret
def __repr__(self):
ret = super(UnifiedResponse, self).__repr__()
ret += '\n' + str(self)
return ret
def __str__(self):
nprov = len(self)
if nprov == 1:
ret = 'Results from {} Provider:\n\n'.format(len(self))
else:
ret = 'Results from {} Providers:\n\n'.format(len(self))
for block in self.responses:
ret += "{} Results from the {}:\n".format(len(block), block.client.__class__.__name__)
lines = repr(block).split('\n')
ret += '\n'.join(lines[1:])
ret += '\n\n'
return ret
"""
Construct a simple AttrWalker to split up searches into blocks of attrs being
'anded' with AttrAnd.
This pipeline only understands AttrAnd and AttrOr, Fido.search passes in an
AttrAnd object of all the query parameters, if an AttrOr is encountered the
query is split into the component parts of the OR, which at somepoint will end
up being an AttrAnd object, at which point it is passed into
_get_registered_widget.
"""
query_walker = attr.AttrWalker()
@query_walker.add_creator(attr.AttrAnd)
def _create_and(walker, query, factory):
is_time = any([isinstance(x, a.Time) for x in query.attrs])
if not is_time:
error = "The following part of the query did not have a time specified:\n"
for at in query.attrs:
error += str(at) + ', '
raise ValueError(error)
# Return the response and the client
return [factory._make_query_to_client(*query.attrs)]
@query_walker.add_creator(attr.AttrOr)
def _create_or(walker, query, factory):
qblocks = []
for attrblock in query.attrs:
qblocks.extend(walker.create(attr.and_(attrblock), factory))
return qblocks
class UnifiedDownloaderFactory(BasicRegistrationFactory):
"""
sunpy.net.Fido(\\*args, \\*\\*kwargs)
Search and Download data from a variety of supported sources.
"""
def search(self, *query):
"""
Query for data in form of multiple parameters.
Examples
--------
Query for LYRALightCurve data for the time range ('2012/3/4','2012/3/6')
>>> from sunpy.net import Fido, attrs as a
>>> import astropy.units as u
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'), a.Instrument('lyra')) # doctest: +REMOTE_DATA
Query for data from Nobeyama Radioheliograph and RHESSI
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... (a.Instrument('norh') & a.Wavelength(17*u.GHz)) | a.Instrument('rhessi')) # doctest: +REMOTE_DATA
Query for 304 Angstrom SDO AIA data with a cadence of 10 minutes
>>> import astropy.units as u
>>> from sunpy.net import Fido, attrs as a
>>> unifresp = Fido.search(a.Time('2012/3/4', '2012/3/6'),
... a.Instrument('AIA'),
... a.Wavelength(304*u.angstrom, 304*u.angstrom),
... a.vso.Sample(10*u.minute)) # doctest: +REMOTE_DATA
Parameters
----------
query : `sunpy.net.vso.attrs`, `sunpy.net.jsoc.attrs`
A query consisting of multiple parameters which define the
requested data. The query is specified using attributes from the
VSO and the JSOC. The query can mix attributes from the VSO and
the JSOC.
Returns
-------
`sunpy.net.fido_factory.UnifiedResponse`
Container of responses returned by clients servicing query.
Notes
-----
The conjunction 'and' transforms query into disjunctive normal form
ie. query is now of form A & B or ((A & B) | (C & D))
This helps in modularising query into parts and handling each of the
parts individually.
""" # noqa
query = attr.and_(*query)
return UnifiedResponse(query_walker.create(query, self))
def fetch(self, *query_results, path=None, max_conn=5, progress=True,
overwrite=False, downloader=None, **kwargs):
"""
Download the records represented by
`~sunpy.net.fido_factory.UnifiedResponse` objects.
Parameters
----------
query_results : `sunpy.net.fido_factory.UnifiedResponse`
Container returned by query method, or multiple.
path : `str`
The directory to retrieve the files into. Can refer to any fields
in `UnifiedResponse.response_block_properties` via string formatting,
moreover the file-name of the file downloaded can be referred to as file,
e.g. "{source}/{instrument}/{time.start}/{file}".
max_conn : `int`, optional
The number of parallel download slots.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bars will be shown at all.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
downloader : `parfive.Downloader`, optional
The download manager to use. If specified the ``max_conn``,
``progress`` and ``overwrite`` arguments are ignored.
Returns
-------
`parfive.Results`
Examples
--------
>>> from sunpy.net.vso.attrs import Time, Instrument
>>> unifresp = Fido.search(Time('2012/3/4','2012/3/5'), Instrument('EIT')) # doctest: +REMOTE_DATA
>>> filepaths = Fido.fetch(unifresp) # doctest: +SKIP
If any downloads fail, they can be retried by passing the `parfive.Results` object back into ``fetch``.
>>> filepaths = Fido.fetch(filepaths) # doctest: +SKIP
"""
if "wait" in kwargs:
raise ValueError("wait is not a valid keyword argument to Fido.fetch.")
if downloader is None:
downloader = Downloader(max_conn=max_conn, progress=progress, overwrite=overwrite)
elif not isinstance(downloader, Downloader):
raise TypeError("The downloader argument must be a parfive.Downloader object.")
# Handle retrying failed downloads
retries = [isinstance(arg, Results) for arg in query_results]
if all(retries):
results = Results()
for retry in query_results:
dr = downloader.retry(retry)
results.data += dr.data
results._errors += dr._errors
return results
elif any(retries):
raise TypeError("If any arguments to fetch are "
"`parfive.Results` objects, all arguments must be.")
reslist = []
for query_result in query_results:
for block in query_result.responses:
reslist.append(block.client.fetch(block, path=path,
downloader=downloader,
wait=False, **kwargs))
results = downloader.download()
# Combine the results objects from all the clients into one Results
# object.
for result in reslist:
if result is None:
continue
if not isinstance(result, Results):
raise TypeError(
"If wait is False a client must return a parfive.Downloader and either None"
" or a parfive.Results object.")
results.data += result.data
results._errors += result.errors
return results
def __call__(self, *args, **kwargs):
raise TypeError("'{}' object is not callable".format(self.__class__.__name__))
def _check_registered_widgets(self, *args):
"""Factory helper function"""
candidate_widget_types = list()
for key in self.registry:
if self.registry[key](*args):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
# There is no default client
raise NoMatchError("This query was not understood by any clients. Did you miss an OR?")
elif n_matches == 2:
# If two clients have reported they understand this query, and one
# of them is the VSOClient, then we ignore VSOClient.
if VSOClient in candidate_widget_types:
candidate_widget_types.remove(VSOClient)
# Finally check that we only have one match.
if len(candidate_widget_types) > 1:
candidate_names = [cls.__name__ for cls in candidate_widget_types]
raise MultipleMatchError("The following clients matched this query. "
"Please make your query more specific.\n"
"{}".format(candidate_names))
return candidate_widget_types
def _make_query_to_client(self, *query):
"""
Given a query, look up the client and perform the query.
Parameters
----------
query : collection of `~sunpy.net.vso.attr` objects
Returns
-------
response : `~sunpy.net.dataretriever.client.QueryResponse`
client : `object`
Instance of client class
"""
candidate_widget_types = self._check_registered_widgets(*query)
tmpclient = candidate_widget_types[0]()
return tmpclient.search(*query), tmpclient
Fido = UnifiedDownloaderFactory(
registry=BaseClient._registry, additional_validation_functions=['_can_handle_query'])
| 37.602247
| 114
| 0.605749
|
4a062a45ba059715a00a6baf4a44bfb36cba4caf
| 631
|
py
|
Python
|
manage.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | 8
|
2020-07-05T17:06:40.000Z
|
2022-02-05T19:44:53.000Z
|
manage.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | 13
|
2020-11-13T18:56:58.000Z
|
2022-03-12T00:38:59.000Z
|
manage.py
|
Poornartha/ProductSocial
|
b2bc1fcf81090cb1edfdce4e76d7345e37d968e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ImageSocial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818
| 75
| 0.684628
|
4a062b5b9f6544478a16c64b7bf3bc7b2fc9ae77
| 95,983
|
py
|
Python
|
tests/pmap_test.py
|
tlu7/jax
|
f4351e84191cf769b59f6e652264bb2b9ab007d8
|
[
"Apache-2.0"
] | null | null | null |
tests/pmap_test.py
|
tlu7/jax
|
f4351e84191cf769b59f6e652264bb2b9ab007d8
|
[
"Apache-2.0"
] | 6
|
2022-01-03T13:15:47.000Z
|
2022-02-14T13:14:10.000Z
|
tests/pmap_test.py
|
tlu7/jax
|
f4351e84191cf769b59f6e652264bb2b9ab007d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import itertools as it
import gc
import os
from random import shuffle
from typing import Optional, cast
import unittest
from unittest import SkipTest
import warnings
import weakref
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax import tree_util
from jax import lax
from jax._src.lax import parallel
from jax._src import api as src_api
from jax import random
from jax.core import ShapedArray
from jax import (pmap, soft_pmap, jit, vmap, jvp, grad, make_jaxpr,
linearize, device_put)
from jax._src import device_array
import jax._src.lib
from jax._src.lib import xla_bridge
from jax._src.util import prod, safe_map
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
prev_xla_flags = None
compatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]
def all_bdims(*shapes, pmap):
bdims = (it.chain([cast(Optional[int], None)], range(len(shape) + 1))
for shape in shapes)
return (t for t in it.product(*bdims) if not all(e is None for e in t))
def out_bdims(shape, pmap):
return (d[0] for d in all_bdims(shape, pmap=pmap) if d[0] is not None)
def add_bdim(bdim_size, bdim, shape):
shape = list(shape)
if bdim is not None:
shape.insert(bdim, bdim_size)
return tuple(shape)
def slicer(x, bdim):
if bdim is None:
return lambda _: x
else:
return lambda i: lax.index_in_dim(x, i, bdim, keepdims=False)
def args_slicer(args, bdims):
slicers = safe_map(slicer, args, bdims)
return lambda i: [sl(i) for sl in slicers]
# Run all tests with 8 CPU devices.
def setUpModule():
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
ignore_slow_all_to_all_warning = partial(
jtu.ignore_warning, message="all_to_all.*expect significant slowdowns.*")
ignore_xmap_warning = partial(
jtu.ignore_warning, message=".*is an experimental.*")
class PythonPmapTest(jtu.JaxTestCase):
@property
def pmap(self):
return src_api._python_pmap
def testDeviceBufferToArray(self):
sda = self.pmap(lambda x: x)(jnp.ones((jax.device_count(), 2)))
buf = sda.device_buffers[-1]
view = jnp.array(buf, copy=False)
self.assertArraysEqual(sda[-1], view)
self.assertEqual(buf.device(), view.device())
self.assertEqual(buf.unsafe_buffer_pointer(), view.unsafe_buffer_pointer())
copy = jnp.array(buf, copy=True)
self.assertArraysEqual(sda[-1], copy)
self.assertEqual(buf.device(), copy.device())
self.assertNotEqual(buf.unsafe_buffer_pointer(), copy.unsafe_buffer_pointer())
def _getMeshShape(self, device_mesh_shape):
device_count = jax.device_count()
if any(size == -1 for size in device_mesh_shape):
try:
return np.arange(device_count).reshape(device_mesh_shape).shape
except ValueError as err:
msg = "device mesh shape {} not compatible with device count {}"
raise SkipTest(msg.format(device_mesh_shape, device_count)) from err
else:
if device_count % prod(device_mesh_shape):
msg = "device mesh size {} does not divide available device count {}"
raise SkipTest(msg.format(prod(device_mesh_shape), device_count))
else:
return device_mesh_shape
def testBasic(self):
f = self.pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMean(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.broadcast_to(np.mean(x, 0), x.shape)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGather(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x] * jax.device_count())
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherTiled(self):
f = self.pmap(lambda x: lax.all_gather(x, 'i', tiled=True), axis_name='i')
device_count = jax.device_count()
shape = (device_count, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x] * device_count).reshape(device_count, -1)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testReduceScatter(self):
f = self.pmap(lambda x: lax.psum_scatter(x, 'i'), axis_name='i')
device_count = jax.device_count()
shape = (device_count, device_count)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.sum(x, axis=0)
ans = f(x)
for i, actual in enumerate(ans):
self.assertAllClose(actual, expected[i])
def testReduceScatterTiled(self):
f = self.pmap(lambda x: lax.psum_scatter(x, 'i', tiled=True), axis_name='i')
device_count = jax.device_count()
shape = (device_count, 4 * device_count)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.sum(x, axis=0)
ans = f(x)
scatter_len = len(expected) // device_count
for i, actual in enumerate(ans):
self.assertAllClose(actual,
expected[i * scatter_len:(i + 1) * scatter_len])
def testReduceScatterReplicaGroupsTiled(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = [[i for i in range(jax.device_count()) if i % 2 == 0],
[i for i in range(jax.device_count()) if i % 2 != 0]]
f = lambda x: lax.psum_scatter(
x, 'i', axis_index_groups=axis_index_groups, tiled=True)
f = self.pmap(f, axis_name='i')
shape = (replicas, 4 * replicas)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
group_1_result = np.sum(x[0::2,:], axis=0)
group_2_result = np.sum(x[1::2,:], axis=0)
# the result is scattered over (replicas // 2) devices
scatter_len = len(group_1_result) * 2 // replicas
for i, actual in enumerate(ans):
expected = group_1_result if i % 2 == 0 else group_2_result
self.assertAllClose(
actual, expected[i // 2 * scatter_len:(i // 2 + 1) * scatter_len])
@ignore_slow_all_to_all_warning()
def testTrees(self):
ptranspose = lambda x, axis_name: lax.all_to_all(x, axis_name, 0, 0)
def protate(x, axis_name):
n = lax.psum(1, axis_name)
return lax.ppermute(x, axis_name, [(i, (i + 1) % n) for i in range(n)])
tree_f = lambda f: partial(tree_util.tree_map, f)
jax_f = lambda p: self.pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
np_transpose = tree_f(np.transpose)
np_rotate = tree_f(lambda x: np.concatenate([x[-1:], x[:-1]]))
n = jax.device_count()
x = {'a': np.arange(1 * n * n, 2 * n * n).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n).reshape([n, n])}
assert_allclose = partial(tree_util.tree_multimap,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
assert_allclose(jax_f(ptranspose)(x), np_transpose(x))
assert_allclose(jax_f(protate)(x), np_rotate(x))
def testCollectivesWithTreesOfDifferentDtypes(self):
n = len(jax.devices())
x = {'a': np.arange(1 * n * n, 2 * n * n, dtype=np.float32).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n, dtype=np.int32).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n, dtype=np.float32).reshape([n, n]),
'd': np.arange(6 * n * n, 7 * n * n, dtype=np.int32).reshape([n, n])}
tree_f = lambda f: partial(tree_util.tree_map, f)
jax_f = lambda p: self.pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
assert_allclose = partial(tree_util.tree_multimap,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
def testComplexPsum(self):
f = self.pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4 * 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape).view(np.complex64)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}",
"split_axis": split_axis, "concat_axis": concat_axis}
for split_axis, concat_axis in it.product(range(2), range(2)))
@ignore_slow_all_to_all_warning()
def testAllToAll(self, split_axis, concat_axis):
pmap_in_axis = 0
shape = (jax.device_count(),) * 3
x = np.arange(np.prod(shape)).reshape(shape)
@partial(self.pmap, axis_name='i')
def f(x):
return lax.all_to_all(x, 'i', split_axis, concat_axis)
y = f(x)
if pmap_in_axis <= split_axis:
split_axis += 1
ref = jnp.moveaxis(x, (pmap_in_axis, split_axis),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}",
"split_axis": split_axis, "concat_axis": concat_axis}
for split_axis, concat_axis in it.product(range(2), range(2)))
@ignore_slow_all_to_all_warning()
def testAllToAllSplitAxis(self, split_axis, concat_axis):
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
pmap_in_axis = 0
shape = (4, 4, 4)
x = np.arange(np.prod(shape)).reshape(shape)
@partial(self.pmap, axis_name='i')
@partial(self.pmap, axis_name='j')
def f(x):
return lax.all_to_all(x, ('i', 'j'), split_axis, concat_axis)
unroll_shape = (2, 2, *shape[1:])
x_unroll = x.reshape(unroll_shape)
y_unroll = f(x_unroll)
y = y_unroll.reshape(shape)
if pmap_in_axis <= split_axis:
split_axis += 1
ref = jnp.moveaxis(x, (pmap_in_axis, split_axis),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
def testNestedBasic(self):
f = lambda x: lax.psum(lax.psum(x, 'i'), 'j')
f = self.pmap(self.pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
shape = (jax.device_count(), 1, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMismatchedAxisSizes(self):
n = jax.device_count()
f = self.pmap(lambda x, y: x + y)
self.assertRaisesRegex(
ValueError,
"pmap got inconsistent sizes for array axes to be mapped",
lambda: f(np.random.randn(n), np.random.randn(n - 1)))
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedShardingAndStacking(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
f = lambda x: x
f = self.pmap(self.pmap(f, 'i'), 'j')
shape = mesh_shape + (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = x
self.assertEqual(ans.shape, expected.shape)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPartiallyMapped(self):
f = self.pmap(lambda x, y: x, in_axes=(None, 0))
g = self.pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
mesh_shape = (jax.device_count(),)
shape = mesh_shape + (4,)
x = np.array(3., dtype=np.float32)
y = np.arange(prod(shape), dtype=np.float32).reshape(shape)
f_expected = np.broadcast_to(x, mesh_shape)
f_ans = f(x, y)
self.assertAllClose(f_ans, f_expected)
self.assertIsInstance(f_ans, pxla.ShardedDeviceArray)
# the output is actually replicated (has the same values in each device buffer)
# but out_axes is implicitly 0, so we shouldn't have replication in the
# sharding spec.
self.assertEmpty([a for a in f_ans.sharding_spec.mesh_mapping
if isinstance(a, pxla.Replicated)])
g_expected = np.broadcast_to(x - np.sum(y, 0, keepdims=True), shape)
g_ans = g(x, y)
self.assertAllClose(g_ans, g_expected)
self.assertIsInstance(g_ans, pxla.ShardedDeviceArray)
self.assertEmpty([a for a in g_ans.sharding_spec.mesh_mapping
if isinstance(a, pxla.Replicated)])
def testReplicate(self):
base = np.array([3.,4.], dtype=np.float32)
num_devices = jax.device_count()
replicated = pxla.replicate(base, num_devices, num_devices, in_axis=None)
self.assertAllClose(base, replicated)
self.assertEmpty([a for a in replicated.sharding_spec.mesh_mapping
if not isinstance(a, pxla.Replicated)])
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testPartiallyMappedNested(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
f = self.pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
f = self.pmap(f, axis_name='j', in_axes=(None, 0))
x = 3.
y = np.arange(prod(mesh_shape), dtype=np.float32).reshape(mesh_shape)
expected = np.broadcast_to(x - np.sum(y, 1, keepdims=True), mesh_shape)
ans = f(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
def testJvpAndPartialEval(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.cos(x)
ans = splitjvp(x)
self.assertAllClose(ans, expected, check_dtypes=False)
make_jaxpr(splitjvp)(x) # doesn't crash
def testGradBasic(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfPsum(self):
@partial(self.pmap, axis_name='i')
def f(x):
return lax.psum(x, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testGradOfJvp(self):
@partial(self.pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
fun = lambda x: jnp.sum(jvp(jnp.sin, (x,), (jnp.ones_like(x),))[1])
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(splitjvp(x)))(x)
expected = grad(fun)(x)
self.assertAllClose(ans, expected)
def testTwoArgsGrad(self):
def f(x, y):
return lax.psum(5. * jnp.cos(x) * jnp.sin(y), 'i')
f = self.pmap(f, 'i')
def g(x, y):
tot = jnp.sum(5. * jnp.cos(x) * jnp.sin(y))
return tot * jnp.ones_like(x) # broadcast to map like pjit does
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = 4 + x
ans = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
expected = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedWithClosure(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
@partial(self.pmap, axis_name='i')
def test_fun(x):
y = jnp.sum(jnp.sin(x))
@partial(self.pmap, axis_name='j')
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
@vmap
def baseline_fun(x):
y = jnp.sum(jnp.sin(x))
@vmap
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
shape = mesh_shape + (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(test_fun(x)))(x)
expected = grad(lambda x: jnp.sum(baseline_fun(x)))(x)
self.assertAllClose(ans, expected, atol=1e-3)
def testShardedDeviceArrays(self):
f = lambda x: 2 * x
f = self.pmap(f, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
# test that we can pass in and out ShardedDeviceArrays
y = f(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertIsInstance(y, device_array.DeviceArray)
self.assertNotIsInstance(y, np.ndarray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
z = f(y)
self.assertIsInstance(z, pxla.ShardedDeviceArray)
self.assertIsInstance(z, device_array.DeviceArray)
self.assertNotIsInstance(z, np.ndarray)
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can pass in a regular DeviceArray
y = f(device_put(x))
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
# test that we can pass a ShardedDeviceArray to a regular jit computation
z = y + y
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can handle device movement on dispatch
y = pxla.make_sharded_device_array(y.aval, y.sharding_spec,
y.device_buffers[::-1])
z = f(y)
self.assertAllClose(z, 2 * 2 * x[::-1], check_dtypes=False)
# test that the repr doesn't crash
repr(z)
# Tests edge cases in lax._reshape_sharded_device_array
@parameterized.named_parameters(
{"testcase_name": "_in={}_out={}".format(in_shape, out_shape)
.replace(" ", ""),
"in_shape": in_shape, "out_shape": out_shape}
for in_shape, out_shape in [
[(1,1), (1,)], [(1,), (1,1)], [(1,), ()], [(4,7), (2,2,7)]
])
def testShardedDeviceArrayReshape(self, in_shape, out_shape):
if jax.device_count() < max(in_shape[:1] + out_shape[:1]):
raise SkipTest("not enough devices")
x = np.arange(prod(in_shape)).reshape(in_shape)
sharded_x = self.pmap(lambda x: x)(x)
self.assertAllClose(sharded_x.reshape(out_shape), x.reshape(out_shape),
check_dtypes=False)
def testPsumMultiple(self):
f = lambda x: lax.psum(x, ('i', 'j'))
f = self.pmap(self.pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
device_count = jax.device_count()
num_pairs, ragged = divmod(device_count, 2)
if num_pairs > 1 and not ragged:
shape = (num_pairs, 2, 4)
else:
shape = (device_count, 1, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPsumConstantReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas).reshape(
2, replicas // 2).tolist()
f = lambda x: x - lax.psum(2., 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected_psum = 2. * replicas // 2
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def testPsumUnevenReplicaGroups(self):
replicas = jax.device_count()
if replicas <= 2:
raise SkipTest("Test expected devices greater than 2.")
axis_index_groups = [[0,1], np.arange(2,replicas)]
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(len(a), x.shape[1]))
expected_psum_1 = sum_helper(x[0:2])
expected_psum_2 = sum_helper(x[2:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPsumReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas).reshape(
2, replicas // 2).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(replicas // 2, x.shape[1]))
expected_psum_1 = sum_helper(x[:replicas // 2])
expected_psum_2 = sum_helper(x[replicas // 2:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherReplicaGroups(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest("Test expected an even number of devices greater than 1.")
axis_index_groups = np.arange(replicas, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((replicas // 2, 2)).T
axis_index_groups = axis_index_groups.tolist()
f = lambda x: lax.all_gather(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
group_1_result = x[0::2]
group_2_result = x[1::2]
expected = np.empty((replicas, replicas // 2, x.shape[1]))
expected[0::2] = group_1_result
expected[1::2] = group_2_result
self.assertAllClose(ans, expected, check_dtypes=False)
def testGatherReplicaGroupsInterleaved(self):
replicas = jax.device_count()
if replicas % 2 != 0:
raise SkipTest("Test expected an even number of devices greater than 1.")
indexes = np.arange(replicas)
indexes = np.concatenate([indexes[::2], indexes[1::2]])
axis_index_groups = indexes.reshape(2, replicas // 2).tolist()
f = lambda x: lax.all_gather(x, 'i', axis_index_groups=axis_index_groups)
f = self.pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = np.zeros((replicas, replicas // 2, x.shape[1]))
expected[::2] = x[::2]
expected[1::2] = x[1::2]
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_slow_all_to_all_warning()
def testGradOfGather(self):
@partial(self.pmap, axis_name='i')
def f(x):
return lax.all_gather(x, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testNestedPmapReplicaGroups(self):
replicas = jax.device_count()
if replicas % 4 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas // 2).reshape(
2, replicas // 4).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f1 = self.pmap(self.pmap(f, 'i'), 'j')
f2 = self.pmap(lambda x: self.pmap(f, 'i')(x) + 1., 'j') # "imperfectly nested" case
f3 = self.pmap(self.pmap(f, 'j'), 'i')
shape = (2, replicas // 2, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f1(a):
return np.broadcast_to(a.sum(1, keepdims=True),
(shape[0], shape[1] // 2, shape[2]))
expected_psum_1 = sum_helper_f1(x[:, :replicas // 4])
expected_psum_2 = sum_helper_f1(x[:, replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 1)
expected = x - expected_psum
ans = f1(x)
self.assertAllClose(ans, expected)
expected = x - expected_psum + 1.
ans = f2(x)
self.assertAllClose(ans, expected)
shape = (replicas // 2, 2, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f3(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(shape[0] // 2, shape[1], shape[2]))
expected_psum_1 = sum_helper_f3(x[:replicas // 4])
expected_psum_2 = sum_helper_f3(x[replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f3(x)
self.assertAllClose(ans, expected)
def testAxisGroups(self):
axis_env = xla.AxisEnv(8, ('i', 'j'), (4, 2))
groups = xla.axis_groups(axis_env, 'i')
self.assertEqual(groups, ((0, 2, 4, 6), (1, 3, 5, 7)))
groups = xla.axis_groups(axis_env, 'j')
self.assertEqual(groups, ((0, 1), (2, 3), (4, 5), (6, 7)))
groups = xla.axis_groups(axis_env, ('i', 'j'))
self.assertEqual(groups, ((0, 1, 2, 3, 4, 5, 6, 7,),))
groups = xla.axis_groups(axis_env, ('j', 'i'))
self.assertEqual(len(groups), 1)
self.assertEqual((tuple(sorted(groups[0])),),
((0, 1, 2, 3, 4, 5, 6, 7,),)) # order doesn't matter
def testCollectivePermute(self):
device_count = jax.device_count()
rotation = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')
f = self.pmap(f, 'i')
x = jnp.arange(4 * device_count).reshape((device_count, 4))
ans = f(x)
expected = np.roll(x, shift=1, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu")
def testCollectivePermuteGrad(self):
device_count = jax.device_count()
shift_right = [(i, (i + 1)) for i in range(device_count - 1)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * self.pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.concatenate([np.pi + np.arange(1, device_count), [0]])
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectivePermuteCyclicGrad(self):
device_count = jax.device_count()
shift_right = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * self.pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.roll(np.pi + np.arange(device_count), -1)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(g, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2)
def testCollectivePermuteCyclicWithPShuffle(self):
device_count = jax.device_count()
values = np.arange(device_count)
shift_right = [(i - 1) % device_count for i in range(device_count)]
f = lambda x: lax.pshuffle(x, perm=shift_right, axis_name='i')
expected = np.roll(values, 1)
ans = np.asarray(self.pmap(f, "i")(values))
self.assertAllClose(ans, expected, check_dtypes=False)
def testPShuffleWithBadPerm(self):
device_count = jax.device_count()
bad_perm = list(range(device_count))
bad_perm[0] = 1
f = lambda x: lax.pshuffle(x, perm=bad_perm, axis_name='i')
g = lambda: self.pmap(f, "i")(np.arange(device_count))
self.assertRaisesRegex(
ValueError,
"`perm` does not represent a permutation: \\[1.*\\]", g)
def testPpermuteWithZipObject(self):
# https://github.com/google/jax/issues/1703
num_devices = jax.device_count()
perm = [num_devices - 1] + list(range(num_devices - 1))
f = self.pmap(lambda x: lax.ppermute(x, "i", zip(perm, range(num_devices))), "i")
result = f(jnp.arange(num_devices, dtype=jnp.float32))
expected = jnp.asarray(perm, dtype=jnp.float32)
self.assertAllClose(result, expected)
def testRule30(self):
# This is a test of collective_permute implementing a simple halo exchange
# to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30
# Halo exchange should be useful in spatially-sharded convolutions and in
# other simulations.
device_count = jax.device_count()
def send_right(x, axis_name):
left_perm = [(i, (i + 1) % device_count) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def send_left(x, axis_name):
left_perm = [((i + 1) % device_count, i) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def update_board(board):
left = board[:-2]
right = board[2:]
center = board[1:-1]
return lax.bitwise_xor(left, lax.bitwise_or(center, right))
@partial(self.pmap, axis_name='i')
def step(board_slice):
left, right = board_slice[:1], board_slice[-1:]
right, left = send_left(left, 'i'), send_right(right, 'i')
enlarged_board_slice = jnp.concatenate([left, board_slice, right])
return update_board(enlarged_board_slice)
board = np.zeros(40, dtype=bool)
board[board.shape[0] // 2] = True
reshaped_board = board.reshape((device_count, -1))
boards = []
def print_board(board):
boards.append(''.join('*' if x else ' ' for x in board.ravel()))
print_board(reshaped_board)
for _ in range(20):
reshaped_board = step(reshaped_board)
print_board(reshaped_board)
ans = '\n'.join(boards)
expected = '\n'.join((
' * ',
' *** ',
' ** * ',
' ** **** ',
' ** * * ',
' ** **** *** ',
' ** * * * ',
' ** **** ****** ',
' ** * *** * ',
' ** **** ** * *** ',
' ** * * **** ** * ',
' ** **** ** * * **** ',
' ** * *** ** ** * * ',
' ** **** ** *** *** ** *** ',
' ** * * *** * *** * * ',
' ** **** ** * * ***** ******* ',
' ** * *** **** * *** * ',
' ** **** ** *** ** ** * *** ',
' ** * * *** * ** *** **** ** * ',
' ** **** ** * ****** * * *** ****',
' * * *** **** **** *** ** * ',
))
print(ans)
self.assertEqual(ans, expected)
def testReduceMax(self):
f = self.pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.max(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testReduceMin(self):
f = self.pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.min(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDeviceCountError(self):
device_count = jax.device_count()
f = self.pmap(lambda x: x)
x = jnp.arange(device_count + 1)
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = self.pmap(lambda x: x)
x = np.ones((device_count + 1, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = self.pmap(lambda x: self.pmap(lambda x: x)(x))
x = np.ones((device_count, 2, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
def testPmapConstant(self):
device_count = jax.device_count()
f = self.pmap(lambda x: 3)
x = jnp.arange(device_count)
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count[0], 0) # TODO(mattjj): fix this
expected = np.repeat(3, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
f = self.pmap(lambda x: (x, 3))
x = np.arange(device_count)
with jtu.assert_num_jit_and_pmap_compilations(1):
_, ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapConstantDevices(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
devices = jax.devices()[:-1]
shuffle(devices)
f = self.pmap(lambda x: 3, devices=devices)
x = jnp.arange(len(devices))
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count[0], 0) # TODO(mattjj): don't compile for constants
expected = np.repeat(3, len(devices))
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
self.assertEqual([b.device() for b in ans.device_buffers], devices)
def testPmapConstantError(self):
device_count = jax.device_count()
f = self.pmap(lambda x: 3)
x = jnp.arange(device_count + 1)
self.assertRaisesRegex(
ValueError,
(r"compiling computation that requires \d+ logical devices, "
r"but only \d+ XLA devices are available .*"),
lambda: f(x))
# TODO(mattjj): test error message with explicit devices
# f = pmap(lambda x: 3, devices=[jax.devices()[0]])
# x = jnp.arange(2)
# self.assertRaisesRegex(
# ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
# r"local devices are available.", lambda: f(x))
def testNestedPmapConstant(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = self.pmap(self.pmap(lambda x: 3))
shape = (2, jax.device_count() // 2, 3)
x = jnp.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count[0], 0) # TODO(mattjj): don't compile for constants
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = self.pmap(self.pmap(lambda x: x))(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
f = self.pmap(self.pmap(lambda x: (x, 3)))
x_sharded, ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in x_sharded.device_buffers])
@unittest.skip("Nested pmaps with devices not yet implemented")
def testNestedPmapConstantDevices(self):
if jax.device_count() < 6:
raise SkipTest("this test requires >= 6 devices")
devices = jax.devices()[:-2]
shuffle(devices)
f = self.pmap(self.pmap(lambda x: 3), devices=devices)
shape = (2, len(devices) // 2, 3)
x = jnp.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
ans = f(x)
# self.assertEqual(count[0], 0) # TODO(mattjj): don't compile for constants
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = self.pmap(self.pmap(lambda x: x), devices=devices)(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
def testNestedPmapConstantError(self):
f = self.pmap(self.pmap(lambda x: 3))
shape = (2, jax.device_count() // 2 + 1, 3)
x = jnp.arange(prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError,
(r"compiling computation that requires \d+ logical devices, "
r"but only \d+ XLA devices are available .*"),
lambda: f(x))
# TODO(mattjj): check error message with explicit devices
# if jax.device_count() > 1:
# f = pmap(pmap(lambda x: 3), devices=jax.devices()[:-1])
# shape = (2, jax.device_count() // 2, 3)
# x = jnp.arange(prod(shape)).reshape(shape)
# self.assertRaisesRegex(
# ValueError,
# (r"compiling computation that requires \d+ replicas, "
# r"but only \d+ XLA devices are available"),
# lambda: f(x))
def testCollectiveConstant(self):
device_count = jax.device_count()
f = self.pmap(lambda x: lax.psum(1, 'i'), 'i')
x = jnp.arange(device_count)
ans = f(x)
expected = np.repeat(device_count, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectiveConstantNested(self):
device_count = jax.device_count()
@partial(self.pmap, axis_name='i')
def f(x):
@partial(self.pmap, axis_name='j')
def g(y):
a = lax.psum(1, 'i')
b = lax.psum(1, 'j')
c = lax.psum(1, ('i', 'j'))
return a, b, c
return g(x)
shape = (device_count, 1, 4)
x = jnp.arange(prod(shape)).reshape(shape)
a, b, c = f(x)
self.assertEqual(a.shape, shape[:-1])
self.assertEqual(b.shape, shape[:-1])
self.assertEqual(c.shape, shape[:-1])
self.assertEqual(a.ravel()[0], device_count)
self.assertEqual(b.ravel()[0], 1)
self.assertEqual(c.ravel()[0], device_count * 1)
def testAxisIndex(self):
device_count = jax.device_count()
f = self.pmap(lambda x: x + lax.axis_index('i'), 'i')
x = jnp.ones(device_count)
ans = f(x)
expected = 1 + np.arange(device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAxisIndexNestedPmap(self):
device_count = jax.device_count()
if device_count < 4:
raise SkipTest("test requires at least four devices")
f = lambda axis: self.pmap(self.pmap(lambda x: x + lax.axis_index(axis), 'j'), 'i')
x = jnp.ones((2, 2))
expected_j = np.broadcast_to(1 + np.arange(2), (2, 2))
self.assertAllClose(f('j')(x), expected_j, check_dtypes=False)
self.assertAllClose(f('i')(x), expected_j.T, check_dtypes=False)
def testAxisIndexNd(self):
device_count = jax.device_count()
if device_count < 4:
raise SkipTest("test requires at least four devices")
f = lambda axes: self.pmap(self.pmap(lambda x: x + lax.axis_index(axes), 'j'), 'i')
x = jnp.ones((2, 2))
expected = 1 + np.arange(4).reshape((2, 2))
self.assertAllClose(f(('i', 'j'))(x), expected, check_dtypes=False)
self.assertAllClose(f(('j', 'i'))(x), expected.T, check_dtypes=False)
def testAxisIndexInInitialStyle(self):
@partial(self.pmap, axis_name='i')
def f(x):
def body(carry, i):
return carry + i + lax.axis_index('i'), None
return lax.scan(body, 0, x)[0]
device_count = jax.device_count()
shape = (device_count, 10)
self.assertAllClose(f(jnp.ones(shape, dtype=int)),
(np.arange(device_count) + 1) * 10)
def testVmapOfPmap(self):
device_count = jax.device_count()
f0 = lambda x: x
f1 = self.pmap(f0, axis_name='i')
ax = np.random.randn(2, device_count, 50, 60)
bx = vmap(f1)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmap2(self):
N_DEVICES = jax.device_count()
keys = random.split(random.PRNGKey(1), 13) # [13, 2]
@self.pmap
def g(key):
_ = random.normal(key, ())
return 0.
@vmap
def s(keys):
keys = tree_util.tree_map(
lambda x: jnp.broadcast_to(x, (N_DEVICES,) + x.shape),
keys)
return g(keys)
ans = s(keys) # doesn't crash
self.assertEqual(ans.shape, (13, N_DEVICES))
def testVmapOfPmap3(self):
# https://github.com/google/jax/issues/3399
device_count = jax.device_count()
if device_count < 2:
raise SkipTest("test requires at least two devices")
def map_version(qs, pts):
return jax.lax.map(lambda x: func(x, pts), qs)
def vmap_version(qs, pts):
return jax.vmap(func, in_axes=(0, None))(qs, pts)
def func(q, pts):
q_from_pmap = self.pmap(lambda x, y: y, in_axes=(0, None))(pts, q)
return q, q_from_pmap
pts = jnp.ones(device_count)
qs = jnp.asarray(((0,0), (3,3), (2,2)))
with ignore_jit_of_pmap_warning():
_, expected = map_version(qs, pts)
_, ans = vmap_version(qs, pts)
self.assertAllClose(ans, expected, check_dtypes=False)
def testVmapOfPmapNonLeadingAxis(self):
device_count = jax.device_count()
f0 = lambda x: x
f1 = self.pmap(f0, axis_name='i')
ax = np.random.randn(device_count, 2, 50, 60)
bx = vmap(f1, in_axes=2, out_axes=2)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmapTuple(self):
device_count = jax.device_count()
f0 = lambda *x: x
f1 = self.pmap(f0, axis_name='i')
ax = np.random.randn(device_count, 2, 50, 60)
ay = np.random.randn(device_count, 30, 2)
az1 = np.random.randn(device_count, 20)
az2 = np.random.randn(2, device_count, 20)
bx, by, bz = vmap(f1, in_axes=(1, 2, (None, 0)), out_axes=(1, 2, 0))(ax, ay, (az1, az2))
self.assertAllClose(ax, bx, check_dtypes=False)
self.assertAllClose(ay, by, check_dtypes=False)
bz1, bz2 = bz
expected_bz1 = np.broadcast_to(az1, (2,) + az1.shape)
self.assertAllClose(expected_bz1, bz1, check_dtypes=False)
self.assertAllClose(bz2, bz2, check_dtypes=False)
@ignore_slow_all_to_all_warning()
def testPswapaxes(self):
device_count = jax.device_count()
shape = (device_count, 3, device_count, 5)
x = np.arange(prod(shape)).reshape(shape)
ans = self.pmap(lambda x: lax.pswapaxes(x, 'i', 1), axis_name='i')(x)
expected = np.swapaxes(x, 0, 2)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_slow_all_to_all_warning()
def testGradOfPswapaxes(self):
device_count = jax.device_count()
shape = (device_count, 1, device_count)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
w = np.arange(device_count, dtype=np.float32)
@partial(self.pmap, axis_name='i')
def f(x, w):
g = lambda x: jnp.sum(lax.pswapaxes(x, 'i', 1) * w)
return grad(g)(x)
ans = f(x, w)
expected = np.tile(w, reps=device_count).reshape(shape)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_slow_all_to_all_warning()
def testAllToAllReplicaGroups(self):
# If num_devices = 4, these would be the inputs/outputs:
# input = [[0, 1], [2, 3], [4, 5], [6, 7]]
# axis_index_groups = [[0, 2], [1, 3]]
# output = [[0, 4], [2, 6], [1, 5], [3, 7]]
#
# This is essentially like splitting the number of rows in the input in two
# groups of rows, and swaping the two inner axes (axis=1 and axis=2), which
# is exactly what the test case checks.
device_count = jax.device_count()
if device_count % 2 != 0:
raise SkipTest('test requires an even number of devices')
shape = (device_count, device_count // 2)
x = np.arange(prod(shape)).reshape(shape)
axis_index_groups = np.arange(device_count, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((device_count // 2, 2)).T
axis_index_groups = axis_index_groups.tolist()
@partial(self.pmap, axis_name='i')
def fn(x):
return lax.all_to_all(x, 'i', 0, 0, axis_index_groups=axis_index_groups)
expected = np.swapaxes(
x.reshape((device_count // 2, 2, device_count // 2)),
0, 2).reshape(shape)
self.assertAllClose(fn(x), expected, check_dtypes=False)
@ignore_slow_all_to_all_warning()
def testGradOfAllToAllReplicaGroups(self):
device_count = jax.device_count()
if device_count % 2 != 0:
raise SkipTest('test requires an even number of devices')
shape = (device_count, device_count // 2, 1)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
w = np.arange(device_count, dtype=np.float32)
axis_index_groups = np.arange(device_count, dtype=np.int32)
axis_index_groups = axis_index_groups.reshape((2, device_count // 2))
axis_index_groups = axis_index_groups.tolist()
@partial(self.pmap, axis_name='i')
def fn(x, w):
g = lambda x: jnp.sum(lax.all_to_all(x, 'i', 0, 1, axis_index_groups=axis_index_groups) * w)
return grad(g)(x)
expected = np.ones_like(x) * w[:, np.newaxis, np.newaxis]
expected = np.swapaxes(
expected.reshape((2, device_count // 2, device_count // 2)),
1, 2).reshape(shape)
self.assertAllClose(fn(x, w), expected, check_dtypes=False)
def testReshardInput(self):
if jax.device_count() < 6:
raise SkipTest("testReshardInput requires 6 devices")
# Manually construct a ShardedDeviceArray with the wrong sharding for the
# subsequent pmap
shard_shape = (3,2)
shard = jnp.arange(prod(shard_shape)).reshape(shard_shape)
bufs = pxla.device_put(shard, jax.devices()[:4], replicate=True)
aval = ShapedArray((6,4), shard.dtype)
sharding_spec = pxla.ShardingSpec(
sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=map(pxla.ShardedAxis, (0, 1)))
arr = pxla.make_sharded_device_array(aval, sharding_spec, bufs)
r = self.pmap(lambda x: x + 1)(arr)
self.assertAllClose(r, arr + 1)
self.assertEqual(len(r.device_buffers), 6)
@ignore_xmap_warning()
def testSoftPmapBatchMatmul(self):
n = 4 * jax.device_count()
xs = np.arange(n * 2 * 3).reshape(n, 2, 3)
ys = np.arange(n * 3 * 4).reshape(n, 3, 4)
ans = soft_pmap(jnp.dot, 'i')(xs, ys)
expected = np.einsum('nij,njk->nik', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapBatchMatmulJit(self):
n = 4 * jax.device_count()
xs = np.arange(n * 2 * 3).reshape(n, 2, 3)
ys = np.arange(n * 3 * 4).reshape(n, 3, 4)
ans = soft_pmap(jit(jnp.dot), 'i')(xs, ys)
expected = np.einsum('nij,njk->nik', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapPsumConstant(self):
n = 4 * jax.device_count()
def f(_):
return lax.psum(1, 'i')
ans = soft_pmap(f, 'i')(jnp.ones(n))
expected = n * np.ones(n)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapPsum(self):
n = 4 * jax.device_count()
def f(x):
return x / lax.psum(x, 'i')
ans = soft_pmap(f, 'i')(jnp.ones(n))
expected = np.ones(n) / n
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapAxisIndex(self):
n = 4 * jax.device_count()
def f(x):
return x * lax.axis_index('i')
ans = soft_pmap(f, 'i')(2 * jnp.ones(n))
expected = 2 * np.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapOfJit(self):
n = 4 * jax.device_count()
def f(x):
return 3 * x
ans = soft_pmap(jit(f), 'i')(np.arange(n))
expected = 3 * np.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
@unittest.skip("not implemented") # TODO(mattjj): re-implement
def testSoftPmapNested(self):
n = 4 * jax.device_count()
@partial(soft_pmap, axis_name='i')
@partial(soft_pmap, axis_name='j')
def f(x):
i_size = lax.psum(1, 'i')
return x + lax.axis_index('i') + i_size * lax.axis_index('j')
ans = f(jnp.zeros((n, n)))
expected = np.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
@unittest.skip("not implemented") # TODO(mattjj): re-implement
def testGradOfSoftPmap(self):
n = 4 * jax.device_count()
@partial(soft_pmap, axis_name='i')
def f(x):
return x * lax.axis_index('i')
ans = grad(lambda x: jnp.sum(f(x)))(jnp.zeros((n, n)))
expected = np.repeat(np.arange(n)[:, None], n, axis=1)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_xmap_warning()
def testSoftPmapDevicePersistence(self):
device_count = jax.device_count()
shape = (2 * 2 * device_count, 2, 3)
# check that we can maintain device persistence across calls
x = np.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ShardedDeviceArray)
x._npy_value = np.float32(np.nan) # can't be coerced to ndarray for xfer
x = soft_pmap(lambda x: x)(x) # doesn't crash
self.assertIsInstance(x, pxla.ShardedDeviceArray)
@unittest.skip("the underlying code here is broken") # TODO(mattjj)
def testSoftPmapAllToAll(self):
n = 4 * jax.device_count()
def f(x):
return lax.all_to_all(x, 'i', 0, 0)
ans = soft_pmap(f, 'i')(jnp.arange(n ** 2).reshape(n, n))
expected = np.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
def testShardedDeviceArrayBlockUntilReady(self):
x = np.arange(jax.device_count())
x = self.pmap(lambda x: x)(x)
x.block_until_ready() # doesn't crash
@ignore_jit_of_pmap_warning()
def testJitPmapComposition(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = jit(self.pmap(f, 'i'))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = self.pmap(jit(f), 'i')(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCompositionWithJitTwice(self):
@jit
def f(x):
y = 2 * x
@jit
def g(z):
return self.pmap(lambda x: x * y)(z)
return g(x)
f(np.arange(1.).reshape((1, 1))) # doesn't crash
@ignore_jit_of_pmap_warning()
def testIssue1065(self):
# from https://github.com/google/jax/issues/1065
device_count = jax.device_count()
def multi_step_pmap(state, count):
@partial(self.pmap, axis_name='x')
@jit
def exchange_and_multi_step(state):
return state
@jit
def time_evolution(state):
return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
return time_evolution(state)
multi_step_pmap(jnp.zeros((device_count,)), count=1)
def testShardedDeviceArrayGetItem(self):
f = lambda x: 2 * x
f = self.pmap(f, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = f(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
z = y[0] # doesn't crash
self.assertAllClose(z, 2 * x[0], check_dtypes=False)
# TODO(mattjj): this fails with multiple devices (unless we add a jit)
# because we assume eager ops (like scan here) can't require more than 1
# replica.
@unittest.skip("need eager multi-replica support")
def testPostProcessMap(self):
# test came from https://github.com/google/jax/issues/1369
nrep = jax.device_count()
def pmvm(a, b):
a = a.reshape((nrep, -1, a.shape[1]))
func = self.pmap(lambda z: jnp.dot(z, b))
return func(a).reshape(b.shape)
n = nrep * 2
rng = np.random.RandomState(0)
a = rng.randn(n, n)
b = rng.randn(n)
iters = jnp.arange(5)
def body(carry, i):
return pmvm(a, carry), i
ans, _ = lax.scan(body, b, iters)
expected = np.linalg.matrix_power(a, 5).dot(b)
self.assertAllClose(ans, expected, check_dtypes=False)
def testManyArgs(self):
@self.pmap
def f(args_list):
return sum(args_list)
vals = list(range(500))
ndevices = jax.device_count()
self.assertAllClose(f(jnp.array([vals] * ndevices)),
jnp.array([sum(vals)] * ndevices))
def testPostProcessMap2(self):
# code from https://github.com/google/jax/issues/2787
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def distributed_matrix_vector(x, y):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
res = self.pmap(fv)(x.reshape((jax.device_count(), -1) + tuple(x.shape[1:])))
res = res.reshape(res.shape[0] * res.shape[1], *res.shape[2:])
return res
key = random.PRNGKey(1)
x = random.normal(key, (80, 50))
batched_mvm = vmap(lambda b: distributed_matrix_vector(x, b), in_axes=0)
y = random.normal(key, (10, 50, 1))
result = batched_mvm(y)
expected = jnp.einsum('ij,njk->nik', x, y)
tol = 1e-1 if jtu.device_under_test() == "tpu" else 1e-3
self.assertAllClose(result, expected, check_dtypes=False, atol=tol, rtol=tol)
def testAxisIndexRemat(self):
# https://github.com/google/jax/issues/2716
n = len(jax.devices())
def f(key):
key = random.fold_in(key, jax.lax.axis_index('i'))
return random.bernoulli(key, p=0.5)
keys = random.split(random.PRNGKey(0), n)
self.pmap(jax.remat(f), axis_name='i')(keys)
def testPmapMapVmapCombinations(self):
# https://github.com/google/jax/issues/2822
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def matrix_vector(x, y, parallel=True):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
if parallel:
# split leading axis in two
new_x = x.reshape((jax.device_count(), -1, *x.shape[1:]))
# apply map
new_res = self.pmap(fv)(new_x)
# reshape back out
res = new_res.reshape(x.shape[0], *new_res.shape[2:])
else:
res = fv(x)
return res
x = random.normal(random.PRNGKey(1), (80, 5))
y = random.normal(random.PRNGKey(1), (10, 5))
result1 = vmap(lambda b: matrix_vector(x, b, True))(y) # vmap + pmap
result2 = lax.map(lambda b: matrix_vector(x, b, False), y) # map + map
with ignore_jit_of_pmap_warning():
result3 = lax.map(lambda b: matrix_vector(x, b, True), y) # map + pmap
result4 = jnp.stack([matrix_vector(x, b, False) for b in y]) # none + map
self.assertAllClose(result1, result2, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result3, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result4, check_dtypes=False, atol=1e-3, rtol=1e-3)
def testPmapAxisNameError(self):
# https://github.com/google/jax/issues/3120
a = np.arange(4)[np.newaxis,:]
def test(x):
return jax.lax.psum(x, axis_name='batch')
with self.assertRaisesRegex(NameError, "unbound axis name: batch"):
self.pmap(test)(a)
def testPsumOnBooleanDtype(self):
# https://github.com/google/jax/issues/3123
n = jax.device_count()
if n > 1:
x = jnp.array([True, False])
out = self.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1, 1])
out = self.pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1/2, 1/2])
else:
x = jnp.array([True])
out = self.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
out = self.pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
def testPsumWithNoAxisDoesntLeakFunctions(self):
x = jnp.ones((1, 1024), dtype=np.float32)
f = lambda _: x
w = weakref.ref(f)
g = self.pmap(f)
g(np.ones((1,), dtype=np.float32)).block_until_ready()
del f, g
gc.collect()
# 'f' should not be alive at this point; in particular the pmap cache must
# not keep it alive.
self.assertTrue(w() is None)
def testJitOfPmapWarningMessage(self):
device_count = jax.device_count()
if device_count == 1:
raise SkipTest("test requires at least two devices")
def foo(x): return x
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jit(self.pmap(foo))(jnp.arange(device_count))
self.assertGreaterEqual(len(w), 1)
self.assertIn("The jitted function foo includes a pmap",
str(w[-1].message))
def testPsumZeroCotangents(self):
# https://github.com/google/jax/issues/3651
def loss(params, meta_params):
(net, mpo) = params
return meta_params * mpo * net
def inner(meta_params, params):
grads = jax.grad(loss)(params, meta_params)
grads = lax.psum(grads, axis_name="i")
net_grads, mpo_grads = grads
net = params[0] + net_grads
mpo = params[1]
return mpo * net
def outer(params):
meta_params = jnp.array(4.0)
return jax.grad(inner)(meta_params, params)
params = (jnp.array([2.0]), jnp.array([3.0]))
self.pmap(outer, axis_name='i')(params) # doesn't crash
f = self.pmap(outer, axis_name='i')
jtu.check_grads(f, (params,), 2, ["fwd", "rev"], 1e-3, 1e-3)
@ignore_jit_of_pmap_warning()
def test_issue_1062(self):
# code from https://github.com/google/jax/issues/1062 @shoyer
# this tests, among other things, whether ShardedDeviceTuple constants work
device_count = jax.device_count()
@jit
def multi_step(state, count):
return lax.fori_loop(0, count, lambda i, s: s, state)
@jit
def multi_step_pmap(state, count=2):
@partial(self.pmap, axis_name='x')
def pmapped_multi_step(state):
return multi_step(state, count)
return pmapped_multi_step(state)
u = np.ones((device_count, 100))
multi_step_pmap(u) # doesn't crash
@jtu.skip_on_devices("cpu")
def test_replicate_backend(self):
# TODO(skye): fix backend caching so we always have multiple CPUs available
if jax.device_count("cpu") < 4:
self.skipTest("test requires 4 CPU device")
# https://github.com/google/jax/issues/4223
def fn(indices):
return jnp.equal(indices, jnp.arange(3)).astype(jnp.float32)
mapped_fn = self.pmap(fn, axis_name='i', backend='cpu')
mapped_fn = self.pmap(mapped_fn, axis_name='j', backend='cpu')
indices = np.array([[[2], [1]], [[0], [0]]])
mapped_fn(indices) # doesn't crash
@ignore_xmap_warning()
def testPdotBasic(self):
num_devices = jax.device_count()
def f(x, y):
return lax.pdot(x, y, 'i')
x = jnp.arange(num_devices * 3).reshape(num_devices, 3)
y = jnp.arange(num_devices * 5).reshape(num_devices, 5)
z = self.pmap(f, axis_name='i', out_axes=None)(x, y)
self.assertAllClose(z, jnp.dot(x.T, y))
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_collective={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, collective.__name__.replace(" ", "")),
"shape": shape, "dtype": dtype, "axis": axis,
"collective": collective, "bulk_op": bulk_op}
for collective, bulk_op in [
(parallel.pargmax, jnp.argmax),
(parallel.pargmin, jnp.argmin)
]
for dtype in [np.float32, np.int32]
for shape in [(4,), (2, 2), (2, 4), (4, 2)]
for axis in range(len(shape))
)
def testArgAllReduce(self, shape, dtype, axis, collective, bulk_op):
if jax.device_count() < shape[axis]:
raise SkipTest(f"test requires at least {shape[axis]} devices")
if (jtu.device_under_test() == 'cpu' and
np.issubdtype(dtype, np.floating) and
len(shape) > 1):
raise SkipTest("skipped on cpu due to strange failures") # TODO(mattjj)
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
ans = self.pmap(lambda x: collective(x, 'i'), in_axes=axis, out_axes=None,
axis_name='i')(x)
expected = bulk_op(x, axis=axis)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_dtype={}".format(
jtu.format_shape_dtype_string((), dtype)),
"dtype": dtype}
for dtype in [np.float32, np.int32]
)
def testPmapDtype(self, dtype):
# Regression test for https://github.com/google/jax/issues/6022
@partial(self.pmap, axis_name='i')
def func(_):
return jax.lax.psum(dtype(0), axis_name='i')
unused_arg = jnp.arange(jax.device_count())
out_dtype = func(unused_arg).dtype
self.assertEqual(out_dtype, dtype)
def test_num_replicas_with_switch(self):
# https://github.com/google/jax/issues/7411
def identity(x):
return x
def cond_of_pmap(x):
y = lax.cond(True, jax.pmap(identity), jax.pmap(identity), x)
return y
with ignore_jit_of_pmap_warning():
cond_of_pmap(jnp.zeros((jax.device_count(), 2)))
def test_static_argnum_on_method(self):
class A:
@partial(self.pmap, static_broadcasted_argnums=(0,))
def my_func_pmap(self, x):
return x + 2
A().my_func_pmap(jnp.asarray([3] * jax.device_count()))
def test_pmap_error_on_non_hashable_static_argument(self):
f = lambda x, y: x + 3
pmapped_f = self.pmap(f, static_broadcasted_argnums=(1,))
inputs = np.asarray([1] * jax.device_count())
with self.assertRaisesRegex(
ValueError, "Non-hashable static arguments are not supported.*"):
pmapped_f(inputs, np.asarray(1))
@parameterized.named_parameters(
{"testcase_name": f"_axis_size={axis_size}", "axis_size": axis_size}
for axis_size in [1, 2])
def test_grad_of_pmap_compilation_caching(self, axis_size):
if len(jax.local_devices()) < axis_size:
raise SkipTest("too few devices for test")
@jax.pmap
def f(x):
return jnp.sin(x)
x = jnp.ones(axis_size)
f(x) # warm-up any dispatching compilations
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
_, f_bwd = jax.vjp(f, x)
_ = f_bwd(x)
self.assertEqual(count[0], 2) # one for fwd, one for bwd
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
_ = jax.vjp(f, x)
_ = f_bwd(x)
self.assertEqual(count[0], 0) # cache hits on fwd and bwd
@unittest.skipIf(jax._src.lib._xla_extension_version < 44,
"XLA extension too old.")
def testSizeOverflow(self):
x = jnp.arange(1)
x = self.pmap(lambda _: jnp.ones([8, 267736, 1024], dtype=jnp.int8))(x)
self.assertEqual(x.size, 8 * 267736 * 1024)
self.assertEqual(type(x.size), int)
class CppPmapTest(PythonPmapTest):
@property
def pmap(self):
return src_api._cpp_pmap
class VmapOfPmapTest(jtu.JaxTestCase):
# TODO(apaszke)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": f"{shapes}_{vmap_in_axes}_{vmap_out_axes}_{pmap_in_axes}_{pmap_out_axes}",
"shapes": shapes,
"vmap_in_axes": vmap_in_axes, "vmap_out_axes": vmap_out_axes,
"pmap_in_axes": pmap_in_axes, "pmap_out_axes": pmap_out_axes
} for arg_shapes in s(compatible_shapes)
for num_args in s(range(1, 4))
for shapes in s(list(it.combinations_with_replacement(arg_shapes, num_args)))
for vmap_in_axes in s(all_bdims(*shapes, pmap=False))
for pmap_in_axes in s(all_bdims(*shapes, pmap=True))
for vmap_out_axes in s(out_bdims(shapes[0], False))
for pmap_out_axes in s(out_bdims(shapes[0], True))
)))
def testVmapOfPmap(self, shapes, vmap_in_axes, pmap_in_axes, vmap_out_axes, pmap_out_axes):
vmapped_size = 3
pmapped_size = jax.device_count()
rng = jtu.rand_default(self.rng())
def fun(*args):
return sum(args)
final_shapes = map(partial(add_bdim, vmapped_size), vmap_in_axes,
map(partial(add_bdim, pmapped_size), pmap_in_axes, shapes))
def args_slice(vi, pi):
return args_slicer(args_slicer(args, vmap_in_axes)(vi), pmap_in_axes)(pi)
args = [rng(shape, jnp.float32) for shape in final_shapes]
ans = vmap(pmap(fun, in_axes=pmap_in_axes, out_axes=pmap_out_axes),
in_axes=vmap_in_axes,
out_axes=vmap_out_axes)(*args)
expected = np.stack(
[np.stack([fun(*args_slice(vi, pi)) for pi in range(pmapped_size)], axis=pmap_out_axes)
for vi in range(vmapped_size)],
axis=vmap_out_axes)
self.assertAllClose(ans, expected)
class VmapPmapCollectivesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_collective={}".format(collective.__name__).replace(" ", ""),
"collective": collective}
for collective in [lax.psum, lax.pmean, lax.pmax, lax.pmin])
def testCollectivesWithVmap(self, collective):
def f(map1, map2):
@partial(map1, axis_name='i')
@partial(map2, axis_name='j')
def f(x, y):
return x + collective(x.dot(y), ('i', 'j'))
return f
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
x = jnp.ones((2, 2, 64, 64))
y = f(jax.pmap, jax.pmap)(x, x)
self.assertAllClose(f(jax.vmap, jax.vmap)(x, x), y)
self.assertAllClose(f(jax.pmap, jax.vmap)(x, x), y)
self.assertAllClose(f(jax.vmap, jax.pmap)(x, x), y)
@parameterized.named_parameters(
{"testcase_name": "_collective={}".format(collective.__name__).replace(" ", ""),
"collective": collective}
for collective in [lax.psum, lax.pmean, lax.pmax, lax.pmin])
def testCollectivesWithVmap2(self, collective):
def f(map1, map2):
@partial(map1, axis_name='i')
@partial(map2, axis_name='j')
def f(x, y):
return x + collective(x.dot(y), ('i', 'j'))
return f
if jax.device_count() < 8:
raise SkipTest("test requires at least eight devices")
x = jnp.arange(4*2*64*64).reshape(4, 2, 64, 64)
y = f(jax.pmap, jax.pmap)(x, x)
self.assertAllClose(f(jax.vmap, jax.vmap)(x, x), y)
self.assertAllClose(f(jax.pmap, jax.vmap)(x, x), y)
self.assertAllClose(f(jax.vmap, jax.pmap)(x, x), y)
def testPPermuteWithVmap(self):
perm = [(0, 1), (1, 0)]
def f(map2):
@partial(jax.pmap, axis_name='i')
@partial(map2)
def f(x, y):
return x + jax.lax.ppermute(x.dot(y), 'i', perm)
return f
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
x = jnp.ones((2, 2, 64, 64))
self.assertAllClose(f(jax.pmap)(x, x), f(jax.vmap)(x, x))
def testPPermuteAgreesWithVmap(self):
if jax.device_count() < 3:
raise SkipTest("test requires at least three devices")
def f(x):
return lax.ppermute(x, 'i', [[1, 0], [2, 1], [0, 2]])
xs = jnp.arange(3) * 10
ys = jax.pmap(f, axis_name='i')(xs)
zs = jax.vmap(f, axis_name='i')(xs)
self.assertAllClose(ys, zs, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}_vmap={vmap_axis}",
"split_axis": split_axis, "concat_axis": concat_axis, "vmap_axis": vmap_axis}
for split_axis, concat_axis, vmap_axis in it.product(range(3), range(3), range(4)))
@ignore_slow_all_to_all_warning()
def testAllToAllInVmap(self, split_axis, concat_axis, vmap_axis):
def f(x):
return lax.all_to_all(x, 'i', split_axis=split_axis, concat_axis=concat_axis)
def adj(axis, hidden_axes):
for hax in sorted(hidden_axes):
if hax <= axis:
axis += 1
return axis
def reference(x, split_axis, concat_axis, vmap_axis):
pmap_axis = 0
vmap_axis = adj(vmap_axis, [pmap_axis])
ref = x
# Step 1.
# Adjust the split axis to the real tensor layout and move it to
# position 1. Since pmap_axis is always 0 we don't have to adjust it,
# but we do have to adjust vmap_axis.
split_axis = adj(split_axis, [pmap_axis, vmap_axis])
ref = jnp.moveaxis(ref, split_axis, pmap_axis + 1)
vmap_axis = vmap_axis + (0 if split_axis < vmap_axis else 1)
split_axis = pmap_axis + 1 # split_axes == 1
# Step 2.
# Now, we move pmap_axis to the position indicated by concat_axis.
concat_axis = adj(concat_axis, [pmap_axis, split_axis, vmap_axis]) - 1
ref = jnp.moveaxis(ref, pmap_axis, concat_axis)
pmap_axis = 0
vmap_axis = vmap_axis - (1 if concat_axis >= vmap_axis else 0)
del split_axis, concat_axis
# Step 3. vmap_axis always ends in position 1, since out_axes=0.
ref = jnp.moveaxis(ref, vmap_axis, 1)
return ref
def verify_ref():
# Both the reference and the real implementation of all_to_all batching involve
# some pretty complicated axis arithmetic, so it would be good to verify that it's
# not the case that the test passes because they're both incorrect. Fortunately, it
# is quite easy to write out the shape function for this code, and we know
# that it should be equivalent to a bunch of transposes, so the code below verifies
# that the reference puts the right dimensions in the right places. Note that we
# can't do the same comparison on f, since all_to_all wouldn't allow us to swap axes of
# different sizes.
start_shape = [2, 3, 4, 5, 6]
instance_shape = start_shape.copy()
pmap_dim_id = instance_shape.pop(0)
vmap_dim_id = instance_shape.pop(vmap_axis)
split_axis_id = instance_shape.pop(split_axis)
instance_shape.insert(concat_axis, pmap_dim_id)
expected_shape = (split_axis_id, vmap_dim_id, *instance_shape)
x = np.empty(start_shape)
self.assertEqual(reference(x, split_axis, concat_axis, vmap_axis).shape,
expected_shape)
verify_ref()
shape = (jax.device_count(),) * 5
x = jnp.arange(np.prod(shape)).reshape(shape)
self.assertAllClose(pmap(vmap(f, in_axes=vmap_axis), axis_name='i')(x),
reference(x, split_axis, concat_axis, vmap_axis))
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}",
"split_axis": split_axis, "concat_axis": concat_axis}
for split_axis, concat_axis in it.product(range(3), range(3)))
@ignore_slow_all_to_all_warning()
def testAllToAllVsVmap(self, split_axis, concat_axis):
def f(x):
return lax.all_to_all(x, 'i', split_axis=split_axis, concat_axis=concat_axis)
shape = (jax.device_count(),) * 4
x = jnp.arange(np.prod(shape)).reshape(shape)
self.assertAllClose(pmap(f, axis_name='i')(x),
vmap(f, axis_name='i')(x))
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}_axes={''.join(axes)}",
"axes": axes, "split_axis": split_axis, "concat_axis": concat_axis}
for axes, split_axis, concat_axis
in it.product([('i', 'j'), ('j', 'i')], range(3), range(3)))
@ignore_slow_all_to_all_warning()
@unittest.skip("multi-axis all_to_all broken after #4835") # TODO(mattjj,apaszke)
def testAllToAllMultipleAxesVsVmap(self, axes, split_axis, concat_axis):
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
def f(x):
return lax.all_to_all(x, axes, split_axis=split_axis, concat_axis=concat_axis)
shape = (2, 2, 4, 4, 4)
x = jnp.arange(np.prod(shape)).reshape(shape)
self.assertAllClose(pmap(pmap(f, axis_name='j'), axis_name='i')(x),
vmap(vmap(f, axis_name='j'), axis_name='i')(x))
def testAllGatherWithVmap(self):
def f(map2):
@partial(jax.pmap, axis_name='i')
@partial(map2)
def f(x):
return jax.lax.all_gather(x, 'i')
return f
if jax.device_count() < 4:
raise SkipTest("test requires at least four devices")
x = jnp.ones((2, 2, 64, 64))
self.assertAllClose(f(jax.pmap)(x), f(jax.vmap)(x))
class PmapWithDevicesTest(jtu.JaxTestCase):
def testAllDevices(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i',
devices=jax.devices())
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected)
def testOneDevice(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
d0 = jax.devices()[0]
d1 = jax.devices()[1]
f = lambda x: jnp.dot(x, x.T)
f0 = pmap(f, devices=[d0])
f1 = pmap(f, devices=[d1])
x = np.random.rand(1, 1000, 1000)
r0 = f0(x)
r1 = f1(x)
expected = np.expand_dims(np.dot(x.squeeze(), x.squeeze().T), 0)
self.assertAllClose(r0, expected, atol=1e-6, rtol=1e-3)
self.assertAllClose(r1, expected, atol=1e-6, rtol=1e-3)
def testNoDevicesError(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i', devices=[])
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
with self.assertRaisesRegex(
ValueError, "'devices' argument to pmap must be non-empty, or None."):
f(x)
def testBadAxisSizeError(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = pmap(lambda x: lax.psum(x, 'i'), axis_name='i',
devices=jax.devices())
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=1, "
r"num_local_devices=\d."):
f(jnp.ones(1))
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=\d, "
r"num_local_devices=\d."):
f(jnp.ones(jax.device_count() + 1))
def testBadAxisSizeErrorNested(self):
f = pmap(pmap(lambda x: lax.psum(x, ('i', 'j')),
axis_name='j'),
axis_name='i',
devices=[jax.local_devices()[0]])
with self.assertRaisesRegex(
ValueError,
r"pmapped function requires 4 local devices to run due to nested "
r"pmapped or other parallel functions, but only 1 are available."):
f(jnp.ones((1, 4)))
def testNestedPmaps(self):
if jax.device_count() % 2 != 0:
raise SkipTest
# Devices specified in outer pmap are OK
@partial(pmap, axis_name='i', devices=jax.devices())
def foo(x):
@partial(pmap, axis_name='j')
def bar(y):
return lax.psum(y, 'j')
return bar(x)
x = jnp.ones((jax.device_count() // 2, 2))
ans = foo(x)
expected = x * 2
self.assertAllClose(ans, expected)
def testNestedPmapsError(self):
# Devices specified in inner pmap not OK
@partial(pmap, axis_name='i')
def foo(x):
@partial(pmap, axis_name='j', devices=jax.devices())
def bar(y):
return lax.psum(y, 'j')
return bar(x)
with self.assertRaisesRegex(
ValueError,
"Nested pmap with explicit devices argument."):
foo(jnp.ones((jax.device_count(), 1)))
def testJitInPmap(self):
@partial(pmap, axis_name='i', devices=jax.devices())
def foo(x):
@jit
def bar(y):
return y + 1
return lax.psum(bar(x), 'i')
ndevices = jax.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices * 2
self.assertAllClose(ans, expected)
@ignore_jit_of_pmap_warning()
def testPmapInJit(self):
@jit
def foo(x):
@partial(pmap, axis_name='i', devices=jax.devices())
def bar(y):
return lax.psum(y, 'i')
return bar(x)
ndevices = jax.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices
self.assertAllClose(ans, expected)
def testGradBasic(self):
@partial(pmap, axis_name='i', devices=jax.devices())
def f(x):
return jnp.sin(x)
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapStaticArgnums(self):
@partial(pmap, axis_name='i', static_broadcasted_argnums=1)
def f(x, y):
return jnp.sin(x + y())
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = lambda: 3.
ans = f(x, y)
expected = np.sin(x + 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapInAxesBasic(self):
@partial(pmap, in_axes=(1, 2))
def f(x, y):
return jnp.sin(x + y)
xshape = (2, jax.device_count(), 4)
x = np.arange(prod(xshape)).reshape(xshape)
yshape = (2, 4, jax.device_count())
y = np.arange(prod(yshape)).reshape(yshape)
self.assertAllClose(f(x, y),
jnp.sin(x.transpose((1, 0, 2)) + y.transpose((2, 0, 1))))
def testPmapInAxesGrad(self):
def f(x, y, z):
return jnp.sin(x + y + z)
fp = pmap(f, in_axes=(1, 2, None))
fv = vmap(f, in_axes=(1, 2, None))
xshape = (5, jax.device_count(), 7)
x = np.arange(prod(xshape), dtype=np.float32).reshape(xshape)
yshape = (5, 7, jax.device_count())
y = np.arange(prod(yshape), dtype=np.float32).reshape(yshape)
zshape = (5, 7)
z = np.arange(prod(zshape), dtype=np.float32).reshape(zshape)
dx, dy, dz = jax.grad(lambda args: fp(*args).sum())((x, y, z))
assert dx.shape == xshape
assert dy.shape == yshape
assert dz.shape == zshape
self.assertAllClose(jax.grad(lambda args: fp(*args).sum())((x, y, z)),
jax.grad(lambda args: fv(*args).sum())((x, y, z)))
def testPmapOutAxesBasic(self):
@partial(pmap, in_axes=(1, None), out_axes=(2, None))
def f(x, y):
return jnp.sin(x + y), y * 2
xshape = (2, jax.device_count(), 4)
x = np.arange(prod(xshape)).reshape(xshape)
yshape = (2, 4)
y = np.arange(prod(yshape)).reshape(yshape)
self.assertAllClose(f(x, y),
(jnp.sin(x.transpose((1, 0, 2)) + y).transpose((1, 2, 0)), y * 2))
def testPmapDictOutAxes(self):
# see issue #6410
@partial(pmap, out_axes={'a': 0})
def f(x):
return {'a': x}
device_count = jax.device_count()
x = jnp.arange(device_count)
tree_util.tree_multimap(self.assertAllClose, f(x), {'a': x})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_{in_axes}_{out_axes}",
"in_axes": in_axes, "out_axes": out_axes}
for in_axes in all_bdims((3, 4), (3, 1), (1, 4), pmap=True)
for out_axes in out_bdims((3, 4), True)
))
def testPmapAllAxesGrad(self, in_axes, out_axes):
def f(x, y, z):
return jnp.sin(x + y) * z
pmapped_size = jax.device_count()
mapped_shapes = [(3, 4), (3, 1), (1, 4)]
arg_shapes = map(partial(add_bdim, pmapped_size), in_axes, mapped_shapes)
rng = jtu.rand_default(self.rng())
args = [rng(shape, jnp.float64) for shape in arg_shapes]
jtu.check_grads(pmap(f, in_axes=in_axes, out_axes=out_axes), args,
order=2, atol=2e-2, rtol=2e-2, eps=1e-3)
def testPmapPostProcess(self):
def mk_case(map_fun):
def f(x, y):
# NOTE: Map doesn't have any arguments we differentiate wrt
@partial(map_fun, in_axes=1, out_axes=2)
def h(y):
return jnp.sin(x + y)
return h(y).sum()
return f
xshape = (5, 7)
x = np.arange(prod(xshape), dtype=np.float32).reshape(xshape)
yshape = (5, jax.device_count(), 7)
y = np.arange(prod(yshape), dtype=np.float32).reshape(yshape)
self.assertAllClose(jax.grad(mk_case(pmap))(x, y),
jax.grad(mk_case(vmap))(x, y))
class ShardedDeviceArrayTest(jtu.JaxTestCase):
def testThreadsafeIndexing(self):
# NOTE(skye): I picked these values to be big enough to cause interesting
# execution overlap, but small enough to not use too much memory. YMMV.
shape = (8, 8000, 1000)
if jax.device_count() < shape[0]:
raise SkipTest(f"requires {shape[0]} devices")
x = jnp.arange(prod(shape)).reshape(shape)
sharded_x = pmap(lambda x: x)(x)
num_threads = 10
futures = []
expected = []
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for i in range(num_threads):
idx = i % shape[0]
# Mix together different kinds of indices
if i % 2 == 0:
idx = slice(idx, idx + 1)
# Use the "kwarg trick" to work around late-binding closures. See
# https://docs.python-guide.org/writing/gotchas/#late-binding-closures.
futures.append(executor.submit(
lambda idx=idx: [sharded_x[idx] for _ in range(10)][0]))
expected.append(x[idx])
actual = [f.result() for f in futures]
self.assertAllClose(actual, expected, check_dtypes=False)
def testNoCopyIndexing1D(self):
shape = (8, 4)
if jax.device_count() < shape[0]:
raise SkipTest(f"requires {shape[0]} devices")
x = jnp.arange(prod(shape)).reshape(shape)
sharded_x = pmap(lambda x: x)(x)
self.assertIsNone(sharded_x._npy_value)
for i in range(8):
self.assertIsInstance(sharded_x[i], device_array.DeviceArray)
self.assertIsNone(sharded_x._npy_value)
def test_device_put_sharded_array(self):
devices = jax.local_devices()
n_devices = len(devices)
x = [np.arange(i, i + 4) for i in range(n_devices)]
y = jax.device_put_sharded(x, devices)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertEqual(len(y.device_buffers), len(devices))
self.assertTrue(all(b.device() == d for b, d in zip(y.device_buffers, devices)))
self.assertArraysEqual(y, jnp.stack(x))
def test_device_put_sharded_pytree(self):
devices = jax.local_devices()
n_devices = len(devices)
x = [(i, np.arange(i, i + 4)) for i in range(n_devices)]
y1, y2 = jax.device_put_sharded(x, devices)
self.assertIsInstance(y1, pxla.ShardedDeviceArray)
self.assertArraysEqual(y1, jnp.array([a for a, _ in x]))
self.assertTrue(all(b.device() == d for b, d in zip(y1.device_buffers, devices)))
self.assertIsInstance(y2, pxla.ShardedDeviceArray)
self.assertArraysEqual(y2, jnp.vstack([b for _, b in x]))
self.assertTrue(all(b.device() == d for b, d in zip(y2.device_buffers, devices)))
def test_device_put_replicated_array(self):
devices = jax.local_devices()
x = np.arange(1, 5)
y = jax.device_put_replicated(x, devices)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertEqual(len(y.device_buffers), len(devices))
self.assertTrue(all(b.device() == d for b, d in zip(y.device_buffers, devices)))
self.assertArraysEqual(y, np.stack([x for _ in devices]))
def test_device_put_replicated_pytree(self):
devices = jax.local_devices()
xs = {'a': np.arange(1, 5), 'b': np.arange(3)}
ys = jax.device_put_replicated(xs, devices)
self.assertIsInstance(ys, dict)
y1, y2 = ys['a'], ys['b']
self.assertIsInstance(y1, pxla.ShardedDeviceArray)
self.assertEqual(len(y1.device_buffers), len(devices))
self.assertTrue(all(b.device() == d for b, d in zip(y1.device_buffers, devices)))
self.assertArraysEqual(y1, np.stack([xs['a'] for _ in devices]))
self.assertIsInstance(y2, pxla.ShardedDeviceArray)
self.assertEqual(len(y2.device_buffers), len(devices))
self.assertTrue(all(b.device() == d for b, d in zip(y2.device_buffers, devices)))
self.assertArraysEqual(y2, np.stack([xs['b'] for _ in devices]))
def test_repr(self):
x = jax.device_put_replicated(1, jax.devices())
self.assertStartsWith(repr(x), 'ShardedDeviceArray')
def test_delete_is_idempotent(self):
x = jax.device_put_replicated(1, jax.devices())
x.delete()
x.delete()
with self.assertRaisesRegex(ValueError,
'ShardedDeviceArray has been deleted.'):
_ = x[0]
class SpecToIndicesTest(jtu.JaxTestCase):
def testShardsPerAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=map(pxla.ShardedAxis, (0, 1)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(0,4)),
(slice(2,4), slice(4,8))))
def testShardedAxisPermutation(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=map(pxla.ShardedAxis, (1, 0)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(2,4), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(4,8))))
def testShardedAxisPermutationAndReplication(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=map(pxla.Chunked, ([2], [2])),
mesh_mapping=(pxla.Replicated(2),
pxla.ShardedAxis(1),
pxla.ShardedAxis(0)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(2,4), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(4,8))) * 2)
def testUnshardedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Chunked([2]), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(0,2), slice(None)),
(slice(2,4), slice(None))))
def testNoSharding(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.NoSharding()),
mesh_mapping=())
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(None), slice(None)),))
def testUnmaterializedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(4), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((0, slice(None)),
(1, slice(None)),
(2, slice(None)),
(3, slice(None))))
shape = (2, 2)
spec = pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.Unstacked(2)),
mesh_mapping=(pxla.ShardedAxis(0),))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(None), 0),
(slice(None), 1)))
def testReplicationAfterUnsharded(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0), pxla.Replicated(3)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
tuple([(0, slice(None))] * 3 + [(1, slice(None))] * 3))
def testReplicationPosition2(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.Chunked([2])),
mesh_mapping=(pxla.ShardedAxis(0), pxla.ShardedAxis(1), pxla.Replicated(3)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(0, 4)), (0, slice(0, 4)),
(0, slice(4, 8)), (0, slice(4, 8)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(0, 4)), (1, slice(0, 4)),
(1, slice(4, 8)), (1, slice(4, 8)), (1, slice(4, 8))))
def testReplicationPosition1(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.Chunked([2])),
mesh_mapping=(pxla.ShardedAxis(0), pxla.Replicated(3), pxla.ShardedAxis(1)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8))))
def testReplicationPosition0(self):
shape = (2, 8)
spec = pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.NoSharding()),
mesh_mapping=(pxla.Replicated(3), pxla.ShardedAxis(0)))
self.assertEqual(pxla.spec_to_indices(shape, spec),
tuple([(0, slice(None)), (1, slice(None))] * 3))
def testMultipleReplications(self):
shape = (2, 7, 4)
spec = pxla.ShardingSpec(
sharding=(pxla.Unstacked(2), pxla.NoSharding(), pxla.Chunked([2])),
mesh_mapping=(pxla.Replicated(3), pxla.Replicated(2),
pxla.ShardedAxis(0), pxla.Replicated(2),
pxla.ShardedAxis(1)))
self.assertEqual(
pxla.spec_to_indices(shape, spec),
((0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4))) * 3 * 2)
def testReplicatedScalar(self):
shape = ()
spec = pxla.ShardingSpec(sharding=(),
mesh_mapping=(pxla.Replicated(3),))
self.assertEqual(pxla.spec_to_indices(shape, spec),
((), (), ()))
def _spec_str(spec):
return (f"({spec.sharding},"
f"{spec.mesh_mapping},)")
class ShardArgsTest(jtu.JaxTestCase):
def numpy_array(x):
return x
def device_array(x):
return jax.device_put(x)
# TODO(skye): add coverage for ShardedDeviceArrays
@parameterized.named_parameters(
{"testcase_name":
f"_shape={shape}_spec={_spec_str(spec)}_arg={make_arg.__name__}"
.replace(" ", ""),
"shape": shape, "spec": spec, "make_arg": make_arg}
for make_arg in [numpy_array, device_array]
for shape, spec in [
# pmap(in_axes=0)
[(4, 8), pxla.ShardingSpec(sharding=(pxla.Unstacked(4), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))],
# pmap(in_axes=1)
[(2, 2), pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.Unstacked(2)),
mesh_mapping=(pxla.ShardedAxis(0),))],
# unsharded
[(4, 8), pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.NoSharding()),
mesh_mapping=())],
# partitioned, 1 axis
[(4, 8), pxla.ShardingSpec(sharding=(pxla.Chunked([2]), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0),))],
# partitioned, 2 axes
[(4, 8), pxla.ShardingSpec(sharding=(pxla.Chunked([2]), pxla.Chunked([2])),
mesh_mapping=map(pxla.ShardedAxis, (0, 1)))],
# partitioned, 2 axes, permuted
[(4, 8), pxla.ShardingSpec(sharding=(pxla.Chunked([2]), pxla.Chunked([2])),
mesh_mapping=map(pxla.ShardedAxis, (1, 0)))],
# partitioned + sharding
[(2, 8), pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.Chunked([2])),
mesh_mapping=map(pxla.ShardedAxis, (0, 1)))],
# replication + sharding
[(2, 8), pxla.ShardingSpec(sharding=(pxla.Unstacked(2), pxla.NoSharding()),
mesh_mapping=(pxla.ShardedAxis(0), pxla.Replicated(3)))],
# replication, no sharding
[(2, 8), pxla.ShardingSpec(sharding=(pxla.NoSharding(), pxla.NoSharding()),
mesh_mapping=(pxla.Replicated(3),))],
# multiple replicated axes
[(1, 8), pxla.ShardingSpec(sharding=(pxla.Unstacked(1), pxla.Chunked([2])),
mesh_mapping=(pxla.Replicated(2), pxla.ShardedAxis(0),
pxla.Replicated(2), pxla.ShardedAxis(1)))],
# replicated scalar
[(), pxla.ShardingSpec(sharding=(),
mesh_mapping=(pxla.Replicated(2), pxla.Replicated(3)))],
])
def testShardArgs(self, shape, spec, make_arg):
indices = pxla.spec_to_indices(shape, spec)
nshards = len(indices)
if jax.device_count() < nshards:
raise SkipTest
x = np.arange(prod(shape)).reshape(shape)
arg = make_arg(x)
bufs = pxla.shard_args(jax.devices()[:nshards],
[indices], [arg])
self.assertEqual(len(bufs), 1)
self.assertEqual(len(bufs[0]), nshards)
for buf, idx in zip(bufs[0], indices):
self.assertAllClose(buf.to_py(), x[idx], check_dtypes=False)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 37.030478
| 105
| 0.626048
|
4a062c0e595e2e8af5db3523a46893c05fd2e992
| 7,365
|
py
|
Python
|
minfraud/komand_minfraud/actions/device_lookup/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
minfraud/komand_minfraud/actions/device_lookup/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
minfraud/komand_minfraud/actions/device_lookup/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
from .schema import DeviceLookupInput, DeviceLookupOutput
# Custom imports below
import minfraud
class DeviceLookup(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='device_lookup',
description='Query device info',
input=DeviceLookupInput(),
output=DeviceLookupOutput())
def run(self, params={}):
address = params.get('address')
user_agent = params.get('user_agent')
accept_language = params.get('accept_language')
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
device = {'ip_address': address}
if user_agent:
device['user_agent'] = user_agent
if accept_language:
device['accept_language'] = accept_language
try:
# Generate request
insights = client.insights({'device': device})
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
except minfraud.InvalidRequestError:
self.logger.error('Invalid request')
raise
except minfraud.HttpError:
self.logger.error('Unexpected HTTP error occurred')
raise
except minfraud.MinFraudError:
self.logger.error('Unexpected content received from server')
raise
# IP portion of response
ip = insights.ip_address
# Overall risk score
risk_score = str(insights.risk_score)
# Risk score for IP
risk = str(ip.risk)
# City info
confidence = ip.city.confidence
geoname_id = ip.city.geoname_id
name = str(ip.city.name)
city = {'confidence': confidence,
'geoname_id': geoname_id,
'name': name
}
# Continent info
code = str(ip.continent.code)
geoname_id = ip.continent.geoname_id
name = str(ip.continent.name)
continent = {'code': code,
'geoname_id': geoname_id,
'name': name
}
# Country info
confidence = ip.country.confidence
geoname_id = ip.country.geoname_id
name = str(ip.country.name)
is_high_risk = ip.country.is_high_risk
iso_code = str(ip.country.iso_code)
country = {'confidence': confidence,
'geoname_id': geoname_id,
'name': name,
'is_high_risk': is_high_risk,
'iso_code': iso_code
}
# Location info
accuracy_radius = ip.location.accuracy_radius
average_income = ip.location.average_income
population_density = ip.location.population_density
latitude = str(ip.location.latitude)
local_time = str(ip.location.local_time)
longitude = str(ip.location.longitude)
metro_code = ip.location.metro_code
time_zone = str(ip.location.time_zone)
location = {'accuracy_radius': accuracy_radius,
'avergae_income': average_income,
'population_density': population_density,
'latitude': latitude,
'local_time': local_time,
'longitude': longitude,
'metro_code': metro_code,
'time_zone': time_zone
}
# Postal info
code = int(ip.postal.code)
confidence = ip.postal.confidence
postal = {'code': code,
'confidence': confidence
}
# Registered country info
geoname_id = ip.registered_country.geoname_id
iso_code = str(ip.registered_country.iso_code)
name = str(ip.registered_country.name)
registered_country = {'geoname_id': geoname_id,
'iso_code': iso_code,
'name': name
}
# Represented country info
geoname_id = ip.represented_country.geoname_id
iso_code = str(ip.represented_country.iso_code)
name = str(ip.represented_country.name)
_type = str(ip.represented_country.type)
represented_country = {'geoname_id': geoname_id,
'iso_code': iso_code,
'name': name,
'_type': _type
}
# Subdivisions info
iso_code = str(ip.subdivisions.most_specific.iso_code)
confidence = ip.subdivisions.most_specific.confidence
geoname_id = ip.subdivisions.most_specific.geoname_id
name = str(ip.subdivisions.most_specific.name)
subdivisions = {'confidence': confidence,
'geoname_id': geoname_id,
'iso_code': iso_code,
'name': name
}
# Traits info
autonomous_system_number = ip.traits.autonomous_system_number
autonomous_system_organization = str(ip.traits.autonomous_system_organization)
domain = str(ip.traits.domain)
is_anonymous_proxy = ip.traits.is_anonymous_proxy
is_satellite_provider = ip.traits.is_satellite_provider
isp = str(ip.traits.isp)
ip_address = str(ip.traits.ip_address)
organization = str(ip.traits.organization)
user_type = str(ip.traits.user_type)
traits = {'autonomous_system_number': autonomous_system_number,
'autonomous_system_organization': autonomous_system_organization,
'domain': domain,
'is_anonymous_proxy': is_anonymous_proxy,
'is_satellite_provider': is_satellite_provider,
'isp': isp,
'ip_address': ip_address,
'organization': organization,
'user_type': user_type
}
# Device info
confidence = insights.device.confidence
id = insights.device.id
last_seen = insights.device.last_seen
device_result = {'confidence': confidence,
'id': id,
'last_seen': last_seen
}
# Clean device dict
device_result = komand.helper.clean_dict(device_result)
# Set result dict
ip_result = {'risk': risk,
'city': city,
'continent': continent,
'country': country,
'location': location,
'postal': postal,
'registered_country': registered_country,
'represented_country': represented_country,
'subdivisions': subdivisions,
'traits': traits
}
# Clean dict
for k, v in ip_result.items():
if k != "risk":
ip_result[k] = komand.helper.clean_dict(ip_result[k])
return {'risk_score': risk_score,
'ip_result': ip_result,
'device_result': device_result
}
def test(self):
user = self.connection.user
license = self.connection.license
# Set client
client = minfraud.Client(user, license)
# Define request
request = {'device': {'ip_address': '8.8.8.8'}}
try:
# Generate request
insights = client.insights(request)
except minfraud.AuthenticationError:
self.logger.error('Authentication failed')
raise
except minfraud.InsufficientFundsError:
self.logger.error('Insufficient funds')
raise
return {}
| 32.879464
| 86
| 0.60353
|
4a062e5dec85f0f16710070616a51c4b5021ea62
| 1,367
|
py
|
Python
|
Python3/1137.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/1137.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/1137.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def tribonacci(self, n: int) -> int:
if n==0:
return 0
if n==1 or n==2:
return 1
a0=0
a1=1
a2=1
while n-2>0:
temp = a0+a1+a2
a0=a1
a1=a2
a2=temp
n-=1
return a2
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def tribonacci(self, n: int) -> int:
#if n == 0: return 0
#elif n == 1 or n == 2: return 1
#else: return self.tribonacci(n-1) + self.tribonacci(n-2) + self.tribonacci(n-3)
seq = [0, 1, 1]
for i in range(n-2):
seq.append(seq[-1]+seq[-2]+seq[-3])
return seq[n]
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def tribonacci(self, n: int) -> int:
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
v0, v1, v2 = 0, 1, 1
for i in range(3, n+1):
v0, v1, v2 = v1, v2, v0+v1+v2
return v2
| 28.479167
| 98
| 0.554499
|
4a062edaae205290737a7378ec55d8f5d070aad8
| 6,399
|
py
|
Python
|
homeassistant/components/google_wifi/sensor.py
|
kauzu/core
|
6eadc0c3033473381cb70babf9f7c813e183d2df
|
[
"Apache-2.0"
] | 2
|
2021-01-29T02:52:01.000Z
|
2021-05-15T04:23:18.000Z
|
homeassistant/components/google_wifi/sensor.py
|
kauzu/core
|
6eadc0c3033473381cb70babf9f7c813e183d2df
|
[
"Apache-2.0"
] | 44
|
2021-03-17T07:49:17.000Z
|
2022-03-31T06:08:18.000Z
|
homeassistant/components/google_wifi/sensor.py
|
kauzu/core
|
6eadc0c3033473381cb70babf9f7c813e183d2df
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Support for retrieving status info from Google Wifi/OnHub routers."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
STATE_UNKNOWN,
TIME_DAYS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle, dt
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_VERSION = "current_version"
ATTR_LAST_RESTART = "last_restart"
ATTR_LOCAL_IP = "local_ip"
ATTR_NEW_VERSION = "new_version"
ATTR_STATUS = "status"
ATTR_UPTIME = "uptime"
DEFAULT_HOST = "testwifi.here"
DEFAULT_NAME = "google_wifi"
ENDPOINT = "/api/v1/status"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
MONITORED_CONDITIONS = {
ATTR_CURRENT_VERSION: [
["software", "softwareVersion"],
None,
"mdi:checkbox-marked-circle-outline",
],
ATTR_NEW_VERSION: [["software", "updateNewVersion"], None, "mdi:update"],
ATTR_UPTIME: [["system", "uptime"], TIME_DAYS, "mdi:timelapse"],
ATTR_LAST_RESTART: [["system", "uptime"], None, "mdi:restart"],
ATTR_LOCAL_IP: [["wan", "localIpAddress"], None, "mdi:access-point-network"],
ATTR_STATUS: [["wan", "online"], None, "mdi:google"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(MONITORED_CONDITIONS)
): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Google Wifi sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
conditions = config.get(CONF_MONITORED_CONDITIONS)
api = GoogleWifiAPI(host, conditions)
dev = []
for condition in conditions:
dev.append(GoogleWifiSensor(api, name, condition))
add_entities(dev, True)
class GoogleWifiSensor(SensorEntity):
"""Representation of a Google Wifi sensor."""
def __init__(self, api, name, variable):
"""Initialize a Google Wifi sensor."""
self._api = api
self._name = name
self._state = None
variable_info = MONITORED_CONDITIONS[variable]
self._var_name = variable
self._var_units = variable_info[1]
self._var_icon = variable_info[2]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}_{self._var_name}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._var_icon
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._var_units
@property
def available(self):
"""Return availability of Google Wifi API."""
return self._api.available
@property
def native_value(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest data from the Google Wifi API."""
self._api.update()
if self.available:
self._state = self._api.data[self._var_name]
else:
self._state = None
class GoogleWifiAPI:
"""Get the latest data and update the states."""
def __init__(self, host, conditions):
"""Initialize the data object."""
uri = "http://"
resource = f"{uri}{host}{ENDPOINT}"
self._request = requests.Request("GET", resource).prepare()
self.raw_data = None
self.conditions = conditions
self.data = {
ATTR_CURRENT_VERSION: STATE_UNKNOWN,
ATTR_NEW_VERSION: STATE_UNKNOWN,
ATTR_UPTIME: STATE_UNKNOWN,
ATTR_LAST_RESTART: STATE_UNKNOWN,
ATTR_LOCAL_IP: STATE_UNKNOWN,
ATTR_STATUS: STATE_UNKNOWN,
}
self.available = True
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the router."""
try:
with requests.Session() as sess:
response = sess.send(self._request, timeout=10)
self.raw_data = response.json()
self.data_format()
self.available = True
except (ValueError, requests.exceptions.ConnectionError):
_LOGGER.warning("Unable to fetch data from Google Wifi")
self.available = False
self.raw_data = None
def data_format(self):
"""Format raw data into easily accessible dict."""
for attr_key in self.conditions:
value = MONITORED_CONDITIONS[attr_key]
try:
primary_key = value[0][0]
sensor_key = value[0][1]
if primary_key in self.raw_data:
sensor_value = self.raw_data[primary_key][sensor_key]
# Format sensor for better readability
if attr_key == ATTR_NEW_VERSION and sensor_value == "0.0.0.0":
sensor_value = "Latest"
elif attr_key == ATTR_UPTIME:
sensor_value = round(sensor_value / (3600 * 24), 2)
elif attr_key == ATTR_LAST_RESTART:
last_restart = dt.now() - timedelta(seconds=sensor_value)
sensor_value = last_restart.strftime("%Y-%m-%d %H:%M:%S")
elif attr_key == ATTR_STATUS:
if sensor_value:
sensor_value = "Online"
else:
sensor_value = "Offline"
elif (
attr_key == ATTR_LOCAL_IP and not self.raw_data["wan"]["online"]
):
sensor_value = STATE_UNKNOWN
self.data[attr_key] = sensor_value
except KeyError:
_LOGGER.error(
"Router does not support %s field. "
"Please remove %s from monitored_conditions",
sensor_key,
attr_key,
)
self.data[attr_key] = STATE_UNKNOWN
| 33.328125
| 88
| 0.602282
|
4a062ee80276a11c9cd04b701d9e1e8fb6dd5c24
| 5,070
|
py
|
Python
|
questions.py
|
Fatiepie/Harriet.ai
|
b41ad15092cf247f0442dd6562075939b78381a9
|
[
"MIT"
] | null | null | null |
questions.py
|
Fatiepie/Harriet.ai
|
b41ad15092cf247f0442dd6562075939b78381a9
|
[
"MIT"
] | null | null | null |
questions.py
|
Fatiepie/Harriet.ai
|
b41ad15092cf247f0442dd6562075939b78381a9
|
[
"MIT"
] | null | null | null |
import nltk
import sys
import os
import string
import math
FILE_MATCHES = 1
SENTENCE_MATCHES = 1
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python questions.py corpus")
# Calculate IDF values across files
files = load_files(sys.argv[1])
file_words = {
filename: tokenize(files[filename])
for filename in files
}
file_idfs = compute_idfs(file_words)
# Prompt user for query
query = set(tokenize(input("Query: ")))
# Determine top file matches according to TF-IDF
filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)
# Extract sentences from top files
sentences = dict()
for filename in filenames:
for passage in files[filename].split("\n"):
for sentence in nltk.sent_tokenize(passage):
tokens = tokenize(sentence)
if tokens:
sentences[sentence] = tokens
# Compute IDF values across sentences
idfs = compute_idfs(sentences)
# Determine top sentence matches
matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)
for match in matches:
print(match)
def load_files(directory):
"""
Given a directory name, return a dictionary mapping the filename of each
`.txt` file inside that directory to the file's contents as a string.
"""
file_contents = dict()
# Opening file in directory and reading it in
for root, _, files in os.walk(directory):
for file in files:
f = open(os.path.join(root, file), "r", encoding="utf8")
file_contents[file] = f.read()
return file_contents
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
punctuation = string.punctuation
stop_words = nltk.corpus.stopwords.words("english")
# Tokenization process
words = nltk.word_tokenize(document.lower())
words = [
word for word in words if word not in punctuation and word not in stop_words]
return words
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
idfs = dict()
total_num_documents = len(documents)
words = set(word for sublist in documents.values() for word in sublist)
for word in words:
num_documents_containing_word = 0
for document in documents.values():
if word in document:
num_documents_containing_word += 1
idf = math.log(total_num_documents / num_documents_containing_word)
idfs[word] = idf
return idfs
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
# Initializing the dict
file_scores = dict()
for file, words in files.items():
total_tf_idf = 0
for word in query:
total_tf_idf += words.count(word) * idfs[word]
file_scores[file] = total_tf_idf
ranked_files = sorted(file_scores.items(),
key=lambda x: x[1], reverse=True)
ranked_files = [x[0] for x in ranked_files]
return ranked_files[:n]
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
sentence_scores = dict()
for sentence, words in sentences.items():
words_in_query = query.intersection(words)
# idf value of sentence
idf = 0
for word in words_in_query:
idf += idfs[word]
# query term density of sentence
num_words_in_query = sum(map(lambda x: x in words_in_query, words))
query_term_density = num_words_in_query / len(words)
# update sentence scores with idf and query term density values
sentence_scores[sentence] = {'idf': idf, 'qtd': query_term_density}
# rank sentences by idf then query term density
ranked_sentences = sorted(sentence_scores.items(), key=lambda x: (
x[1]['idf'], x[1]['qtd']), reverse=True)
ranked_sentences = [x[0] for x in ranked_sentences]
return ranked_sentences[:n]
if __name__ == "__main__":
main()
| 30.914634
| 85
| 0.658185
|
4a062f8097168202db7100b2cedc1adc39bc5752
| 1,232
|
py
|
Python
|
gym/spaces/multi_discrete.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | 10
|
2019-10-20T21:27:41.000Z
|
2021-06-11T23:38:45.000Z
|
gym/spaces/multi_discrete.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | 4
|
2021-06-08T20:53:32.000Z
|
2022-03-12T00:13:40.000Z
|
gym/spaces/multi_discrete.py
|
MrJayK/gym
|
49d33fa83c6b65859d584939dc6e72f1ad36882d
|
[
"Python-2.0",
"OLDAP-2.7"
] | 3
|
2020-03-05T05:30:12.000Z
|
2021-04-22T05:37:05.000Z
|
import gym
import numpy as np
from .space import Space
class MultiDiscrete(Space):
def __init__(self, nvec):
"""
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
self.nvec = np.asarray(nvec, dtype=np.uint32)
super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
self.np_random = np.random.RandomState()
def seed(self, seed):
self.np_random.seed(seed)
def sample(self):
return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
def contains(self, x):
# if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
# is within correct bounds for space dtype (even though x does not have to be unsigned)
return (0 <= x).all() and (x < self.nvec).all()
def to_jsonable(self, sample_n):
return [sample.tolist() for sample in sample_n]
def from_jsonable(self, sample_n):
return np.array(sample_n)
def __repr__(self):
return "MultiDiscrete({})".format(self.nvec)
def __eq__(self, other):
return np.all(self.nvec == other.nvec)
| 31.589744
| 96
| 0.641234
|
4a0630e70d1a843d4401cf9d0011ee9b231b613d
| 3,032
|
py
|
Python
|
minitests/timfuz/node_unique.py
|
nelsobe/litghost
|
557d047316f7cd7ddfabc17f54ff5b1a47b564d2
|
[
"0BSD"
] | 1
|
2020-10-31T19:32:41.000Z
|
2020-10-31T19:32:41.000Z
|
minitests/timfuz/node_unique.py
|
nelsobe/prjxray
|
557d047316f7cd7ddfabc17f54ff5b1a47b564d2
|
[
"0BSD"
] | null | null | null |
minitests/timfuz/node_unique.py
|
nelsobe/prjxray
|
557d047316f7cd7ddfabc17f54ff5b1a47b564d2
|
[
"0BSD"
] | 1
|
2021-03-08T21:02:02.000Z
|
2021-03-08T21:02:02.000Z
|
import re
def gen_nodes(fin):
for l in fin:
lj = {}
l = l.strip()
for kvs in l.split():
name, value = kvs.split(':')
'''
NAME:LIOB33_SING_X0Y199/IOB_IBUF0
IS_BAD:0
IS_COMPLETE:1
IS_GND:0 IS_INPUT_PIN:1 IS_OUTPUT_PIN:0 IS_PIN:1 IS_VCC:0
NUM_WIRES:2
PIN_WIRE:1
'''
if name in ('COST_CODE', 'SPEED_CLASS'):
value = int(value)
lj[name] = value
tile_type, xy, wname = re.match(
r'(.*)_(X[0-9]*Y[0-9]*)/(.*)', lj['NAME']).groups()
lj['tile_type'] = tile_type
lj['xy'] = xy
lj['wname'] = wname
lj['l'] = l
yield lj
def run(node_fin, verbose=0):
refnodes = {}
nodei = 0
for nodei, anode in enumerate(gen_nodes(node_fin)):
def getk(anode):
return anode['wname']
#return (anode['tile_type'], anode['wname'])
if nodei % 1000 == 0:
print 'Check node %d' % nodei
# Existing node?
try:
refnode = refnodes[getk(anode)]
except KeyError:
# Set as reference
refnodes[getk(anode)] = anode
continue
# Verify equivilence
for k in (
'SPEED_CLASS',
'COST_CODE',
'COST_CODE_NAME',
'IS_BAD',
'IS_COMPLETE',
'IS_GND',
'IS_VCC',
):
if k in refnode and k in anode:
def fail():
print 'Mismatch on %s' % k
print refnode[k], anode[k]
print refnode['l']
print anode['l']
#assert 0
if k == 'SPEED_CLASS':
# Parameters known to effect SPEED_CLASS
# Verify at least one parameter is different
if refnode[k] != anode[k]:
for k2 in ('IS_PIN', 'IS_INPUT_PIN', 'IS_OUTPUT_PIN',
'PIN_WIRE', 'NUM_WIRES'):
if refnode[k2] != anode[k2]:
break
else:
if 0:
print
fail()
elif refnode[k] != anode[k]:
print
fail()
# A key in one but not the other?
elif k in refnode or k in anode:
assert 0
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=
'Determines which info is consistent across nodes with the same name')
parser.add_argument('--verbose', type=int, help='')
parser.add_argument(
'node_fn_in', default='/dev/stdin', nargs='?', help='Input file')
args = parser.parse_args()
run(open(args.node_fn_in, 'r'), verbose=args.verbose)
| 29.436893
| 78
| 0.441953
|
4a06315ea75076de2e62efb983dd38142d868bbf
| 3,460
|
py
|
Python
|
tests/test_slack.py
|
rabibh/blackbox
|
c77dac14449a4a3d74ee3d2b00170b3085642bbf
|
[
"MIT"
] | 1
|
2021-09-11T04:35:06.000Z
|
2021-09-11T04:35:06.000Z
|
tests/test_slack.py
|
rabibh/blackbox
|
c77dac14449a4a3d74ee3d2b00170b3085642bbf
|
[
"MIT"
] | null | null | null |
tests/test_slack.py
|
rabibh/blackbox
|
c77dac14449a4a3d74ee3d2b00170b3085642bbf
|
[
"MIT"
] | null | null | null |
import pytest
import requests_mock
from blackbox.exceptions import MissingFields
from blackbox.handlers.notifiers.slack import Slack
WEBHOOK = "https://hooks.slack.com/services/x/x/x"
@pytest.fixture
def mock_valid_slack_config():
"""Mock valid Slack config."""
return {"webhook": WEBHOOK}
@pytest.fixture
def mock_valid_slack_config_with_block_kit():
"""Mock valid Slack config with block kit usage."""
return {"webhook": WEBHOOK, "use_block_kit": True}
@pytest.fixture
def mock_invalid_slack_config():
"""Mock invalid Slack config."""
return {}
def test_slack_handler_can_be_instantiated_with_required_fields(mock_valid_slack_config):
"""Test if the slack notifier handler can be instantiated."""
Slack(**mock_valid_slack_config)
def test_slack_handler_fails_without_required_fields(mock_invalid_slack_config):
"""Test if the slack notifier handler cannot be instantiated with missing fields."""
with pytest.raises(MissingFields):
Slack(**mock_invalid_slack_config)
def test_slack_handler_instantiates_optional_fields(mock_valid_slack_config_with_block_kit):
"""Test if the slack notifier handler instantiates optional fields."""
slack_instance = Slack(**mock_valid_slack_config_with_block_kit)
assert slack_instance.config["use_block_kit"] is True
def test_slack_notify(mock_valid_slack_config, report):
"""Test report parsing for slack notifications."""
slack = Slack(**mock_valid_slack_config)
slack.report = report
assert slack._parse_report() == {
'attachments': [
{
'author_icon': 'https://raw.githubusercontent.com/lemonsaurus/blackbox/main/img/blackbox_avatar.png',
'author_name': 'blackbox',
'color': '#0FA031',
'fields': [{'short': True,
'title': 'main_mongo',
'value': ':white_check_mark: main_s3'}],
'mrkdwn_in': ['fields'],
'title': 'Backup'
}
]
}
with requests_mock.Mocker() as m:
m.post(WEBHOOK)
slack.notify()
def test_slack_notify_modern(mock_valid_slack_config_with_block_kit, report):
"""Test report parsing for slack notifications."""
slack = Slack(**mock_valid_slack_config_with_block_kit)
slack.report = report
assert slack._parse_report() == {
'blocks': [
{
'text': {
'text': 'Backup', 'type': 'plain_text'
},
'type': 'header'
},
{
'fields': [
{
'text': '*main_mongo*\n:white_check_mark: main_s3', 'type': 'mrkdwn'
}
], 'type': 'section'
},
{
'elements': [
{
'alt_text': 'blackbox',
'image_url': 'https://raw.githubusercontent.com/lemonsaurus/blackbox/main/img/blackbox_avatar.png',
'type': 'image'
},
{
'emoji': True,
'text': 'blackbox',
'type': 'plain_text'
}
],
'type': 'context'
}
]
}
with requests_mock.Mocker() as m:
m.post(WEBHOOK)
slack.notify()
| 30.619469
| 123
| 0.568786
|
4a0632224c38f31f67de893e03823ba0e381267a
| 6,626
|
py
|
Python
|
fastapi_amis_admin/crud/base.py
|
cnss63/fastapi_amis_admin
|
52537d153a002b3c468cb2634779010be956a9b4
|
[
"Apache-2.0"
] | null | null | null |
fastapi_amis_admin/crud/base.py
|
cnss63/fastapi_amis_admin
|
52537d153a002b3c468cb2634779010be956a9b4
|
[
"Apache-2.0"
] | null | null | null |
fastapi_amis_admin/crud/base.py
|
cnss63/fastapi_amis_admin
|
52537d153a002b3c468cb2634779010be956a9b4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Callable, List, Type, Union, Optional
from fastapi import APIRouter, Depends
from pydantic import BaseModel
from starlette import status
from starlette.exceptions import HTTPException
from starlette.requests import Request
from .schema import BaseApiOut, ItemListSchema, CrudEnum, Paginator
from .utils import schema_create_by_schema, paginator_factory
class RouterMixin:
router: APIRouter = None
router_prefix: Optional[str] = None
router_permission_depend: Callable = None
def __init__(self):
self.router = self.get_router()
def get_router(self) -> APIRouter:
if self.router is None:
if self.router_prefix is None:
self.router_prefix = '/' + self.__class__.__name__.lower()
self.router = APIRouter(prefix=self.router_prefix, tags=[self.router_prefix[1:]])
if self.router_permission_depend is not None:
self.router.dependencies.insert(0,Depends(self.router_permission_depend))
return self.router
def error_no_router_permission(self, request: Request):
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail='No router permissions')
class BaseCrud(RouterMixin):
schema_model: Type[BaseModel] = None
schema_list: Type[BaseModel] = None
schema_filter: Type[BaseModel] = None
schema_create: Type[BaseModel] = None
schema_read: Type[BaseModel] = None
schema_update: Type[BaseModel] = None
pk_name: str = 'id'
list_per_page_max: int = None
def __init__(self, schema_model: Type[BaseModel], router: APIRouter = None):
self.paginator: Type[Paginator] = Paginator
self.schema_model = schema_model or self.schema_model
assert self.schema_model, 'schema_model is None'
self.router = router
RouterMixin.__init__(self)
@property
def router_prefix(self):
return '/' + self.schema_model.__name__.lower()
@property
def schema_name_prefix(self):
return self.__class__.__name__
def register_crud(self,
schema_list: Type[BaseModel] = None,
schema_filter: Type[BaseModel] = None,
schema_create: Type[BaseModel] = None,
schema_read: Type[BaseModel] = None,
schema_update: Type[BaseModel] = None,
list_max_per_page: int = None,
depends_list: List[Depends] = None,
depends_read: List[Depends] = None,
depends_create: List[Depends] = None,
depends_update: List[Depends] = None,
depends_delete: List[Depends] = None
) -> "BaseCrud":
self.schema_list = schema_list or self.schema_list or self.schema_model
self.schema_filter = schema_filter or self.schema_filter or schema_create_by_schema(
self.schema_list, self.schema_name_prefix + 'Filter', set_none=True)
self.schema_create = schema_create or self.schema_create or self.schema_model
self.schema_read = schema_read or self.schema_read or self.schema_model
self.schema_update = schema_update or self.schema_update or \
schema_create_by_schema(self.schema_model, self.schema_name_prefix + 'Update',
exclude={self.pk_name}, set_none=True)
self.list_per_page_max = list_max_per_page or self.list_per_page_max
self.paginator = paginator_factory(perPage_max=self.list_per_page_max)
self.router.add_api_route("/list",
self.route_list,
methods=["POST"],
response_model=BaseApiOut[ItemListSchema[self.schema_list]],
dependencies=depends_list,
name=CrudEnum.list.value)
self.router.add_api_route(
"/item/{item_id}",
self.route_read,
methods=["GET"],
response_model=BaseApiOut[Union[self.schema_read, List[self.schema_read]]],
dependencies=depends_read,
name=CrudEnum.read.value
)
self.router.add_api_route(
"/item",
self.route_create,
methods=["POST"],
response_model=BaseApiOut[Union[self.schema_model, int]],
dependencies=depends_create,
name=CrudEnum.create.value
)
self.router.add_api_route(
"/item/{item_id}",
self.route_update,
methods=["PUT"],
response_model=BaseApiOut[int],
dependencies=depends_update,
name=CrudEnum.update.value
)
self.router.add_api_route(
"/item/{item_id}",
self.route_delete,
methods=["DELETE"],
response_model=BaseApiOut[int],
dependencies=depends_delete,
name=CrudEnum.delete.value
)
return self
@property
def route_list(self) -> Callable[..., Any]:
raise NotImplementedError
@property
def route_read(self) -> Callable[..., Any]:
raise NotImplementedError
@property
def route_create(self) -> Callable[..., Any]:
raise NotImplementedError
@property
def route_update(self) -> Callable[..., Any]:
raise NotImplementedError
@property
def route_delete(self) -> Callable[..., Any]:
raise NotImplementedError
async def has_list_permission(self, request: Request, paginator: Optional[Paginator], filter: Optional[BaseModel],
**kwargs) -> bool:
return True
async def has_create_permission(self, request: Request, obj: Optional[BaseModel], **kwargs) -> bool:
return True
async def has_read_permission(self, request: Request, item_id: Optional[List[str]], **kwargs) -> bool:
return True
async def has_update_permission(self, request: Request, item_id: Optional[List[str]], obj: Optional[BaseModel],
**kwargs) -> bool:
return True
async def has_delete_permission(self, request: Request, item_id: Optional[List[str]], **kwargs) -> bool:
return True
def error_key_exists(self, request: Request):
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Key already exists")
def error_data_handle(self, request: Request):
raise HTTPException(status.HTTP_400_BAD_REQUEST, "error data handle")
| 40.402439
| 118
| 0.626471
|
4a0632baa6d79f02c02ca8df4d3ecea746c9b382
| 1,785
|
py
|
Python
|
python_modules/dagstermill/setup.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagstermill/setup.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagstermill/setup.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from setuptools import find_packages, setup
def get_version(name):
version = {}
with open('dagstermill/version.py') as fp:
exec(fp.read(), version) # pylint: disable=W0122
if name == 'dagstermill':
return version['__version__']
elif name == 'dagstermill-nightly':
return version['__nightly__']
else:
raise Exception('Shouldn\'t be here: bad package name {name}'.format(name=name))
parser = argparse.ArgumentParser()
parser.add_argument('--nightly', action='store_true')
def _do_setup(name='dagstermill'):
setup(
name=name,
version=get_version(name),
author='Elementl',
license='Apache-2.0',
packages=find_packages(exclude=['dagstermill_tests']),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
install_requires=[
'dagster',
'dagster-pandas',
'enum-compat>=0.0.1',
'future>=0.16.0, <0.17.0a0',
'ipykernel>=4.9.0',
'nteract-scrapbook>=0.2.0',
'papermill>=1.0.0',
'scikit-learn==0.20.3',
'six>=1.11.0',
],
entry_points={'console_scripts': ['dagstermill = dagstermill.cli:main']},
)
if __name__ == '__main__':
parsed, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
if parsed.nightly:
_do_setup('dagstermill-nightly')
else:
_do_setup('dagstermill')
| 29.262295
| 88
| 0.57591
|
4a0633709d7e215f9955c8d05768d860f4e41908
| 995
|
py
|
Python
|
Lib/site-packages/rest_framework_simplejwt/exceptions.py
|
Priyansh863/e-backend
|
c8c1bb8a7c0de96febacfeec76249256c8df3303
|
[
"bzip2-1.0.6"
] | 4
|
2019-12-09T08:53:12.000Z
|
2020-10-29T21:41:40.000Z
|
rest_framework_simplejwt/exceptions.py
|
urantialife/django-rest-framework-simplejwt
|
89cdeaa76f99aea6bc208bf914df35034169ddc7
|
[
"BSD-2-Clause",
"MIT"
] | 21
|
2021-03-11T00:34:32.000Z
|
2022-03-12T00:39:44.000Z
|
rest_framework_simplejwt/exceptions.py
|
urantialife/django-rest-framework-simplejwt
|
89cdeaa76f99aea6bc208bf914df35034169ddc7
|
[
"BSD-2-Clause",
"MIT"
] | 17
|
2019-09-11T10:30:54.000Z
|
2020-12-03T06:19:00.000Z
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, status
class TokenError(Exception):
pass
class TokenBackendError(Exception):
pass
class DetailDictMixin:
def __init__(self, detail=None, code=None):
"""
Builds a detail dictionary for the error to give more information to API
users.
"""
detail_dict = {'detail': self.default_detail, 'code': self.default_code}
if isinstance(detail, dict):
detail_dict.update(detail)
elif detail is not None:
detail_dict['detail'] = detail
if code is not None:
detail_dict['code'] = code
super().__init__(detail_dict)
class AuthenticationFailed(DetailDictMixin, exceptions.AuthenticationFailed):
pass
class InvalidToken(AuthenticationFailed):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = _('Token is invalid or expired')
default_code = 'token_not_valid'
| 24.875
| 80
| 0.686432
|
4a0633aceb2900f07621369b8df4b6b02bc627b9
| 2,829
|
py
|
Python
|
docs/code/Tours.py
|
bjrnmath/debuggingbook
|
8b6cd36fc75a89464e9252e40e1d4edcb6a70559
|
[
"MIT"
] | null | null | null |
docs/code/Tours.py
|
bjrnmath/debuggingbook
|
8b6cd36fc75a89464e9252e40e1d4edcb6a70559
|
[
"MIT"
] | null | null | null |
docs/code/Tours.py
|
bjrnmath/debuggingbook
|
8b6cd36fc75a89464e9252e40e1d4edcb6a70559
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Tours through the Book" - a chapter of "The Debugging Book"
# Web site: https://www.debuggingbook.org/html/Tours.html
# Last change: 2021-05-11 15:25:24+02:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Debugging Book - Tours through the Book
This file can be _executed_ as a script, running all experiments:
$ python Tours.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from debuggingbook.Tours import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.debuggingbook.org/html/Tours.html
For more details, source, and documentation, see
"The Debugging Book - Tours through the Book"
at https://www.debuggingbook.org/html/Tours.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'debuggingbook'
# Tours through the Book
# ======================
if __name__ == '__main__':
print('# Tours through the Book')
from .bookutils import rich_output
if __name__ == '__main__':
if rich_output():
from IPython.display import SVG
sitemap = SVG(filename='PICS/Sitemap.svg')
else:
sitemap = None
sitemap
## The Pragmatic Programmer Tour
## -----------------------------
if __name__ == '__main__':
print('\n## The Pragmatic Programmer Tour')
## The Young Researcher Tour
## -------------------------
if __name__ == '__main__':
print('\n## The Young Researcher Tour')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
| 28.867347
| 73
| 0.702368
|
4a0634954931ca16ac1471a6893421074969ce22
| 1,464
|
py
|
Python
|
elastalert/rule_from_kibana.py
|
perceptron01/elastalert2
|
bb91ecdb03dedda207237ca83d628fd5d40d29c6
|
[
"Apache-2.0"
] | 8,191
|
2015-02-20T19:24:34.000Z
|
2022-03-31T15:51:07.000Z
|
elastalert/rule_from_kibana.py
|
perceptron01/elastalert2
|
bb91ecdb03dedda207237ca83d628fd5d40d29c6
|
[
"Apache-2.0"
] | 2,793
|
2015-02-20T20:58:50.000Z
|
2022-03-30T23:19:13.000Z
|
elastalert/rule_from_kibana.py
|
perceptron01/elastalert2
|
bb91ecdb03dedda207237ca83d628fd5d40d29c6
|
[
"Apache-2.0"
] | 2,176
|
2015-02-27T05:45:22.000Z
|
2022-03-31T04:38:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import yaml
from elastalert.kibana import filters_from_dashboard
from elastalert.util import elasticsearch_client
def main():
es_host = input("Elasticsearch host: ")
es_port = input("Elasticsearch port: ")
db_name = input("Dashboard name: ")
send_get_body_as = input("Method for querying Elasticsearch[GET]: ") or 'GET'
es = elasticsearch_client({'es_host': es_host, 'es_port': es_port, 'send_get_body_as': send_get_body_as})
print("Elastic Version:" + es.es_version)
query = {'query': {'term': {'_id': db_name}}}
if es.is_atleastsixsix():
# TODO check support for kibana 7
# TODO use doc_type='_doc' instead
res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_includes=['dashboard'])
else:
res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard'])
if not res['hits']['hits']:
print("No dashboard %s found" % (db_name))
exit()
db = json.loads(res['hits']['hits'][0]['_source']['dashboard'])
config_filters = filters_from_dashboard(db)
print("\nPartial Config file")
print("-----------\n")
print("name: %s" % (db_name))
print("es_host: %s" % (es_host))
print("es_port: %s" % (es_port))
print("filter:")
print(yaml.safe_dump(config_filters))
if __name__ == '__main__':
main()
| 30.5
| 120
| 0.648224
|
4a0634d974af01f4662c97e17b3ddc8a26fcaccd
| 2,751
|
py
|
Python
|
backend/blueprints/spa_api/service_layers/replay/groups.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 69
|
2018-07-17T19:40:21.000Z
|
2022-02-25T14:23:53.000Z
|
backend/blueprints/spa_api/service_layers/replay/groups.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 335
|
2018-07-25T19:34:55.000Z
|
2022-02-26T06:04:32.000Z
|
backend/blueprints/spa_api/service_layers/replay/groups.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 42
|
2018-07-21T00:04:23.000Z
|
2022-02-25T14:23:42.000Z
|
import logging
from typing import List
from backend.database.wrapper import player_wrapper
from backend.database.wrapper.chart.chart_data import ChartData, ChartDataPoint
from backend.database.wrapper.chart.player_chart_metadata import player_group_stats_metadata
from backend.database.wrapper.chart.stat_point import StatDataPoint
from backend.database.wrapper.stats import player_stat_wrapper
logger = logging.getLogger(__name__)
wrapper = player_stat_wrapper.PlayerStatWrapper(player_wrapper.PlayerWrapper(limit=10))
class ReplayGroupChartData(ChartData):
def __init__(self, title: str, chart_data_points: List[ChartDataPoint], type_: str, subcategory: str):
super().__init__(title, chart_data_points)
self.type = type_
self.subcategory = subcategory
@staticmethod
def create_from_ids(ids: List[str]) -> List['ReplayGroupChartData']:
stats = wrapper.get_group_stats(ids)
player_stats = stats['playerStats']
player_names = [player['name'] for player in player_stats]
if 'ensembleStats' in stats:
player_stats.append(stats['ensembleStats'])
categories = list(player_stats[0]['stats'].keys())
all_chart_data = []
for chart_metadata in player_group_stats_metadata:
for category in categories:
chart_data_points = []
for player in player_stats:
name = player['name'] if 'name' in player and player['name'] in player_names else 'Ensemble'
value = player['stats'][category].get(chart_metadata.stat_name, 0)
is_orange = player['is_orange'] if 'is_orange' in player else None
if is_orange is not None:
chart_data_points.append(StatDataPoint(
name=name,
value=value,
is_orange=is_orange
))
else:
chart_data_points.append(ChartDataPoint(
name=name,
value=value
))
chart_data = ReplayGroupChartData(
title=chart_metadata.stat_name + ' ' + category,
chart_data_points=chart_data_points,
type_=chart_metadata.type,
subcategory=chart_metadata.subcategory
)
if all(chart_data_point['value'] is None or chart_data_point['value'] == 0 for chart_data_point in
chart_data.chartDataPoints):
continue
all_chart_data.append(chart_data)
return all_chart_data
| 41.681818
| 114
| 0.606325
|
4a0635175b796a2efcc7e0e003bdcd883b3c737a
| 1,471
|
py
|
Python
|
taxjar/data/breakdown_line_item.py
|
danpalmer/taxjar-python
|
f22867bede037970970394d4c2a7635e9ec28ae9
|
[
"MIT"
] | 24
|
2015-06-22T19:32:18.000Z
|
2022-03-28T17:51:47.000Z
|
taxjar/data/breakdown_line_item.py
|
danpalmer/taxjar-python
|
f22867bede037970970394d4c2a7635e9ec28ae9
|
[
"MIT"
] | 14
|
2017-05-16T15:26:15.000Z
|
2022-03-17T08:06:51.000Z
|
taxjar/data/breakdown_line_item.py
|
danpalmer/taxjar-python
|
f22867bede037970970394d4c2a7635e9ec28ae9
|
[
"MIT"
] | 15
|
2017-05-04T13:42:23.000Z
|
2022-03-12T18:14:36.000Z
|
from jsonobject import JsonObject
from taxjar.data.float_property import TaxJarFloatProperty
class TaxJarBreakdownLineItem(JsonObject):
# NB: can return either string or integer
# `id` is a valid property, but isn't enforced here
# id = StringProperty()
taxable_amount = TaxJarFloatProperty()
tax_collectable = TaxJarFloatProperty()
combined_tax_rate = TaxJarFloatProperty()
state_taxable_amount = TaxJarFloatProperty()
state_sales_tax_rate = TaxJarFloatProperty()
state_amount = TaxJarFloatProperty()
county_taxable_amount = TaxJarFloatProperty()
county_tax_rate = TaxJarFloatProperty()
county_amount = TaxJarFloatProperty()
city_taxable_amount = TaxJarFloatProperty()
city_tax_rate = TaxJarFloatProperty()
city_amount = TaxJarFloatProperty()
special_district_taxable_amount = TaxJarFloatProperty()
special_tax_rate = TaxJarFloatProperty()
special_district_amount = TaxJarFloatProperty()
country_taxable_amount = TaxJarFloatProperty()
country_tax_rate = TaxJarFloatProperty()
country_tax_collectable = TaxJarFloatProperty()
gst_taxable_amount = TaxJarFloatProperty()
gst_tax_rate = TaxJarFloatProperty()
gst = TaxJarFloatProperty()
pst_taxable_amount = TaxJarFloatProperty()
pst_tax_rate = TaxJarFloatProperty()
pst = TaxJarFloatProperty()
qst_taxable_amount = TaxJarFloatProperty()
qst_tax_rate = TaxJarFloatProperty()
qst = TaxJarFloatProperty()
| 40.861111
| 59
| 0.774303
|
4a0636656e9d56bcef81330f4eb0e8bca6d34297
| 266
|
py
|
Python
|
test/tests/wrapperdesc.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/wrapperdesc.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/tests/wrapperdesc.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
class C(object):
pass
print C.__str__ is object.__str__
print type(C).__str__ is object.__str__
print type(None).__str__ is object.__str__
print type(None).__str__ is None.__str__
print type(None.__str__)
print type(type(None).__str__.__get__(None, type(None)))
| 29.555556
| 56
| 0.778195
|
4a063704b699ec091d828903fe2a9d5a5a5a3a24
| 12,689
|
py
|
Python
|
remo/events/migrations/0014_auto__del_field_eventmetricoutcome_outcome__add_field_eventmetricoutco.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
remo/events/migrations/0014_auto__del_field_eventmetricoutcome_outcome__add_field_eventmetricoutco.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
remo/events/migrations/0014_auto__del_field_eventmetricoutcome_outcome__add_field_eventmetricoutco.py
|
glogiotatidis/remo
|
1c4f55c63c8d03cbee776b60af042b8068d9f297
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'EventMetricOutcome.outcome'
db.rename_column(u'events_eventmetricoutcome', 'outcome', 'expected_outcome')
def backwards(self, orm):
# Adding field 'EventMetricOutcome.outcome'
db.rename_column(u'events_eventmetricoutcome', 'expected_outcome', 'outcome')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.attendance': {
'Meta': {'object_name': 'Attendance'},
'date_subscribed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'events.event': {
'Meta': {'ordering': "['start']", 'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_attended'", 'symmetrical': 'False', 'through': u"orm['events.Attendance']", 'to': u"orm['auth.User']"}),
'budget_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_budget_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['remozilla.Bug']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_categories'", 'symmetrical': 'False', 'to': u"orm['profiles.FunctionalArea']"}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'converted_visitors': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'estimated_attendance': ('django.db.models.fields.PositiveIntegerField', [], {}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'extra_content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'goals': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_goals'", 'symmetrical': 'False', 'to': u"orm['events.EventGoal']"}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'metrics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['events.EventMetric']", 'through': u"orm['events.EventMetricOutcome']", 'symmetrical': 'False'}),
'mozilla_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events_created'", 'to': u"orm['auth.User']"}),
'planning_pad_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'swag_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_swag_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['remozilla.Bug']"}),
'times_edited': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'events.eventcomment': {
'Meta': {'ordering': "['id']", 'object_name': 'EventComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'events.eventgoal': {
'Meta': {'ordering': "['name']", 'object_name': 'EventGoal'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '127', 'blank': 'True'})
},
u'events.eventmetric': {
'Meta': {'ordering': "['name']", 'object_name': 'EventMetric'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.eventmetricoutcome': {
'Meta': {'object_name': 'EventMetricOutcome'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
'expected_outcome': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.EventMetric']"})
},
u'events.metric': {
'Meta': {'object_name': 'Metric'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'outcome': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
u'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
u'remozilla.bug': {
'Meta': {'ordering': "['-bug_last_change_time']", 'object_name': 'Bug'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_assigned'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'bug_creation_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bug_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'bug_last_change_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'cc': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'bugs_cced'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'component': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'council_vote_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_created'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'first_comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resolution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'whiteboard': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'})
}
}
complete_apps = ['events']
| 80.310127
| 218
| 0.566002
|
4a0637883fe40e5ea7066ae8c883b2e7979be5dc
| 782
|
py
|
Python
|
data/__init__.py
|
yongchaoding/bus_passenger_detection
|
a716631eb6370e795f7ca40a8869d6c4ba290091
|
[
"Apache-2.0"
] | null | null | null |
data/__init__.py
|
yongchaoding/bus_passenger_detection
|
a716631eb6370e795f7ca40a8869d6c4ba290091
|
[
"Apache-2.0"
] | null | null | null |
data/__init__.py
|
yongchaoding/bus_passenger_detection
|
a716631eb6370e795f7ca40a8869d6c4ba290091
|
[
"Apache-2.0"
] | null | null | null |
import torch
def detection_collate(data):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes)."""
images = []
labels = []
for sample in data:
images.append(sample[0])
labels.append(sample[1])
return torch.stack(images, 0), labels
bus_passenger_cfg = {
'num_classes': 2,
'lr_steps': (80000, 100000),
'max_iter': 120000,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'VOC',
}
| 30.076923
| 81
| 0.575448
|
4a0637bc3e472326933299aaf5ee066b201451d3
| 2,189
|
py
|
Python
|
django_ip_geolocation/middleware.py
|
girishkumarkh/django-ip-geolocation
|
d5e04db42ac9cf17b6b23f2572e3cacd59afec74
|
[
"MIT"
] | 30
|
2019-12-29T13:44:13.000Z
|
2022-03-25T19:26:43.000Z
|
django_ip_geolocation/middleware.py
|
girishkumarkh/django-ip-geolocation
|
d5e04db42ac9cf17b6b23f2572e3cacd59afec74
|
[
"MIT"
] | 28
|
2019-12-28T14:36:12.000Z
|
2021-04-13T08:44:38.000Z
|
django_ip_geolocation/middleware.py
|
girishkumarkh/django-ip-geolocation
|
d5e04db42ac9cf17b6b23f2572e3cacd59afec74
|
[
"MIT"
] | 6
|
2020-01-09T12:25:14.000Z
|
2021-10-20T11:45:42.000Z
|
"""Django middleware."""
import logging
from django.utils.deprecation import MiddlewareMixin # noqa: E501 pylint: disable=import-error
from django_ip_geolocation.utils import get_geolocation, set_cookie, \
clean_geolocation_data, is_user_consented
from django_ip_geolocation.settings import IP_GEOLOCATION_SETTINGS as _settings # noqa: E501
class IpGeolocationMiddleware(MiddlewareMixin):
"""Mixin Middleware Hook."""
def __init__(self, get_response=None): # noqa: D107
self._geolocation_data = None
super(IpGeolocationMiddleware, self).__init__(get_response)
def process_request(self, request):
"""Process the request."""
try:
if not _settings.get('ENABLE_REQUEST_HOOK'):
return
if not is_user_consented(request):
return
self._get_geolocation(request)
request.geolocation = self._geolocation_data
except Exception:
logging.error("Couldn't geolocate ip", exc_info=True)
def process_response(self, request, response):
"""Process the response."""
try:
if not _settings.get('ENABLE_RESPONSE_HOOK') and \
not _settings.get('ENABLE_COOKIE'):
return response
if not is_user_consented(request):
return response
if self._geolocation_data is None:
self._get_geolocation(request)
if _settings.get('ENABLE_RESPONSE_HOOK'):
# Response hook is enabled
header = _settings.get('RESPONSE_HEADER')
response[header] = self._geolocation_data
if _settings.get('ENABLE_COOKIE'):
cleaned_geolocation_data = clean_geolocation_data(
self._geolocation_data, ['raw_data'])
set_cookie(response, cleaned_geolocation_data)
except Exception:
logging.error("Couldn't geolocate ip", exc_info=True)
return response
def _get_geolocation(self, request):
"""Fetch geolcation using backend defined in settings."""
self._geolocation_data = get_geolocation(request)
| 35.306452
| 95
| 0.644587
|
4a06385da3137908be5a7f945afaee4d5ea2cd98
| 1,799
|
py
|
Python
|
segmentation/feature_extraction_test.py
|
peternara/graph-based-image-classification-gcn
|
60e93b47691e960b7f06f7a5dc11191efe881178
|
[
"MIT"
] | 44
|
2017-02-26T16:52:48.000Z
|
2022-02-17T18:50:02.000Z
|
segmentation/feature_extraction_test.py
|
hungerzs/graph-based-image-classification
|
d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8
|
[
"MIT"
] | 2
|
2018-11-14T05:11:25.000Z
|
2020-06-23T16:24:41.000Z
|
segmentation/feature_extraction_test.py
|
hungerzs/graph-based-image-classification
|
d44182c6a28b4ab9a691a9cb1ecd4c3b851875a8
|
[
"MIT"
] | 13
|
2018-04-26T07:46:35.000Z
|
2022-02-28T15:38:53.000Z
|
from math import sqrt, pi as PI
import tensorflow as tf
from .feature_extraction import feature_extraction
class FeaturesTest(tf.test.TestCase):
def test_features(self):
image = tf.constant([
[[255, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [255, 255, 255]],
[[255, 255, 255], [255, 255, 255], [255, 255, 255]],
])
segmentation = tf.constant([
[0, 0, 0],
[0, 0, 1],
[1, 1, 1],
], dtype=tf.int32)
M = [[5, 2, 2, 2],
[4, 1, 1, 1],
[6, 1, 1, 1],
[10, 1, 1, 1]]
bbox = [2, 3]
convex_area = 5
perimeter = 4 + (1 + sqrt(2)) / 2
Mw = [[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
mean = [51, 0, 0]
minimum = [0, 0, 0]
maximum = [255, 0, 0]
with self.test_session() as sess:
f = feature_extraction(segmentation, image).eval()[0]
self.assertEqual(len(f), 45)
self.assertAllEqual(f[0:4], M[0])
self.assertAllEqual(f[4:8], M[1])
self.assertAllEqual(f[8:12], M[2])
self.assertAllEqual(f[12:16], M[3])
self.assertAllEqual(f[16:18], bbox)
self.assertEqual(f[18], convex_area)
self.assertEqual(round(f[19]*100), round(perimeter*100))
self.assertAllEqual(f[20:24], Mw[0])
self.assertAllEqual(f[24:28], Mw[1])
self.assertAllEqual(f[28:32], Mw[2])
self.assertAllEqual(f[32:36], Mw[3])
self.assertAllEqual(f[36:39], mean[0:3])
self.assertAllEqual(f[39:42], minimum)
self.assertAllEqual(f[42:45], maximum)
| 30.491525
| 68
| 0.460256
|
4a063a004314b8713d778a7e0a761a9e1eaca665
| 15,036
|
py
|
Python
|
src/aed/models.py
|
rgrzeszi/bof-aed
|
f7d62e63fdb4460c0bda912ba8037c77a2876cfc
|
[
"MIT"
] | 14
|
2017-11-24T23:34:04.000Z
|
2021-06-29T01:59:27.000Z
|
src/aed/models.py
|
rgrzeszi/bof-aed
|
f7d62e63fdb4460c0bda912ba8037c77a2876cfc
|
[
"MIT"
] | null | null | null |
src/aed/models.py
|
rgrzeszi/bof-aed
|
f7d62e63fdb4460c0bda912ba8037c77a2876cfc
|
[
"MIT"
] | 4
|
2016-11-01T12:33:22.000Z
|
2018-02-10T07:42:59.000Z
|
'''
Copyright (C) 2015 Axel Plinge and Rene Grzeszick.
This file made available under the terms of the MIT license (see the LICENSE file).
For any publication please cite the corresponding paper (see the README file).
'''
import sys
import numpy as np
from sklearn.mixture import GMM
from features import FeatureNormalizer
class BoFModelTemporal(object):
'''
Soft quantization using an supervised GMM using input features with temporal augmentation.
Computes normalized/pooled probabilities of Gaussians.
'''
_OFFSET_VALUE = 10
def __init__(self, vocab_size, frames):
'''
Initializes the temporal model
@param vocab_size: size of the overall vocabulary (vocab_size/num_classes per class)
@param frames: number of frames per temporal step
if negative, number of temporal steps == coordinates
'''
self.vocab_size = vocab_size
self.gmm = None
self.gmms = []
self.frames = frames
self.featuresize = vocab_size
return
def __augment_temporal(self, features):
'''
Add quantized temporal information to the features.
@param features: input features
@return: the augmented features
'''
# No temporal information
temp_features = []
if self.frames == 0:
return features
# number of frames to get the same temporal coordinate
if self.frames > 0:
div = self.frames
else:
div = max(1, (1 + len(features)) / -self.frames)
# Append temporal coordinates
for t_i, feat_i in enumerate(features):
temp_feat = np.hstack([int(t_i / div) * self._OFFSET_VALUE, feat_i])
temp_features.append(temp_feat)
temp_features = np.array(temp_features)
return temp_features
def train(self, datadict, labels):
'''
Trains the model using the given data
@param datadict: dictonary of label,features
@param labels: the labels of the datadict in a given order
'''
# Stack the features
allfeatures = np.vstack(list([np.vstack(x) for x in datadict.values()]))
# Determine the normalisation statistics and remember them
self.norm = FeatureNormalizer()
self.norm.setup(allfeatures)
# Get number of classes
ncl = len(labels)
# Compute vocabsize per class
vocab_size_per_cl = max(1, self.vocab_size / ncl)
# Update vocabsize to account for rounding errors
self.vocab_size = vocab_size_per_cl * ncl
#
# Initialize complete GMM (used for supercodebook)
# This will later on be overwritten and is
# a workaround to pre-initialize an sklearn GMM
#
self.gmms = []
print >> sys.stderr, 'Initialising GMM with', self.vocab_size, 'components,', vocab_size_per_cl, 'per class.'
self.gmm = GMM(self.vocab_size, n_iter=2, params='w')
# Initialize by fitting with ones
self.gmm.fit(np.ones((self.vocab_size, allfeatures.shape[1] + 1)))
#
# For each class train a GMM
#
index = 0
for label in labels:
# Compute feature representations
temp_feat_reprs = []
for feat in datadict[label]:
feat = self.norm.normalize(feat)
feat = self.__augment_temporal(feat)
temp_feat_reprs.append(feat)
temp_feat_reprs = np.vstack(temp_feat_reprs)
print >> sys.stderr, ("Training a GMM for label %s with %d densities, using data of shape %s"
% (label, vocab_size_per_cl, str(np.shape(temp_feat_reprs))))
# Train the GMM
gmm = GMM(vocab_size_per_cl, covariance_type='diag')
gmm.fit(temp_feat_reprs)
# Overwrite values from supervised codebook GMM by class GMMs
self.gmm.means_ [index * vocab_size_per_cl:(index + 1) * vocab_size_per_cl, :] = gmm.means_
self.gmm.covars_ [index * vocab_size_per_cl:(index + 1) * vocab_size_per_cl, :] = gmm.covars_
self.gmm.weights_[index * vocab_size_per_cl:(index + 1) * vocab_size_per_cl] = gmm.weights_ / float(ncl)
# Append the GMM
self.gmms.append(gmm)
index += 1
# Set uniform GMM weights
self.gmm.weights_ = np.ones(self.vocab_size) / self.vocab_size
# Set feature size
self.featuresize = self.vocab_size
return
def getfeaturesize(self):
'''
Returns the feature size (i.e. vocab_size)
'''
return self.featuresize
def getfeatures(self, features):
'''
Returns the Temporal Bag-of-Super-Features representation
for the feature matrix [frames x features]
'''
# Normalize features
norm_feat = self.norm.normalize(features)
# Append temporal information
temp_feat = self.__augment_temporal(norm_feat)
# Get probabilities
probas = self.gmm.predict_proba(temp_feat)
# Compute mean for BoF as frequencies
bof_hist = np.mean(probas, axis=0)
return bof_hist
class BoFGMMBase(object):
'''
Abstract base class for the GMM based models.
Used by the Pyramid and supervised codebook model
'''
def __init__(self, vocab_size):
'''
Initialization
'''
self.vocab_size = vocab_size
self.normalize = True
self.norm = FeatureNormalizer()
self.gmm = None
return
def compute_super_codebook(self, feature_size):
'''
Merges the GMMs that were computed for each class
into one GMM (super codebook).
@param feature_size: dimensionality of a feature vector
'''
# Get number of classes
ncl = len(self.labels)
# Compute vocabsize per class
vocab_size_per_cl = max(1, self.vocab_size / ncl)
# Create GMM for overall repr
print >> sys.stderr, 'Using GMM with', self.vocab_size, 'and', vocab_size_per_cl , 'per class.'
self.gmm = GMM(self.vocab_size, n_iter=1, params='w', covariance_type='diag')
# Init by fitting with ones
self.gmm.fit(np.ones((self.vocab_size, feature_size)))
# Overwrite values from supervised codebook GMM by class GMMs
index = 0
for _, sgmm in self.gmms.iteritems():
vocab_size_per_cl = len(sgmm.means_)
self.gmm.means_ [index:index + vocab_size_per_cl, :] = sgmm.means_
self.gmm.covars_ [index:index + vocab_size_per_cl, :] = sgmm.covars_
index += vocab_size_per_cl
# Set uniform GMM weights
self.gmm.weights_ = np.ones(self.vocab_size) / float(self.vocab_size)
return
def train(self, datadict, labels, rand_features=True):
'''
Trains a scipy GMM for each class, joins them into a super codebook.
@param datadict: Dictionary of class labels.
Inside each label there is a list of feature matrices for each window [frames x feature]
@param labels: the labels of the datadict in a given order
@param rand_features: Shuffles the samples before running the GMM
'''
self.criterion = []
# Stack the features
allfeatures = np.vstack(list([np.vstack(x) for x in datadict.values()]))
# Determine the normalisation statistics and remember them
self.norm = FeatureNormalizer()
self.norm.setup(allfeatures)
# Get number of classes
ncl = len(labels)
# Compute vocabsize per class
vocab_size_per_cl = max(1, self.vocab_size / ncl)
# Update vocabsize to account for rounding errors
self.vocab_size = vocab_size_per_cl * ncl
#
# Train GMMs for each class
#
self.gmms = {}
self.labels = labels
for label in labels:
# Compute feature representations
feats = np.vstack(datadict[label])
if rand_features:
np.random.shuffle(feats)
if self.normalize:
norm_features = self.norm.normalize(feats)
else:
norm_features = (feats)
print >> sys.stderr, ("Training a GMM for label %s, using scipy and data of shape %s"
% (label, str(np.shape(norm_features))))
# Train the gmm
sub_gmm = GMM(vocab_size_per_cl, covariance_type='diag', n_iter=100)
sub_gmm.fit(norm_features)
# Set GMM for class
self.gmms[label] = sub_gmm
#
# Combine GMMs to super codebook
#
self.compute_super_codebook(allfeatures.shape[1])
return
def classify_proba(self, features):
'''
Returns the GMM predictions for the features of each component
'''
return self.gmm.predict_proba(features)
def score_samples(self, features):
'''
Return the GMM scores for the features
'''
return self.gmm.score_samples(features)
class BoFModelSuper(BoFGMMBase):
'''
Soft quantization using an supervised GMM.
Computes normalized/pooled probabilities of Gaussians.
'''
def __init__(self, vocab_size=100):
'''
Initializes the supervised Bag-of-Features model
'''
super(BoFModelSuper, self).__init__(vocab_size)
self.featuresize = vocab_size
return
def train(self, datadict, labels):
'''
Trains the GMM Model with supervised codebooks
and soft quantization
'''
super(BoFModelSuper, self).train(datadict, labels, rand_features=True)
self.featuresize = self.vocab_size
return
def getfeaturesize(self):
'''
Returns the feature size (i.e. vocab_size)
'''
return self.featuresize
def getfeatures(self, features):
'''
Returns the Bag-of-Super-Features representation
for the feature matrix [frames x features]
'''
norm_features = self.norm.normalize(features)
probas = self.gmm.predict_proba(norm_features)
bof_hist = np.mean(probas, axis=0)
return bof_hist
class BoFModelPyramid(BoFGMMBase):
'''
Soft quantization using an supervised GMM using a pyramid.
Computes normalized/pooled probabilities of Gaussians on multiple tiles.
'''
def __init__(self, vocab_size=100, tiles=2):
'''
Initializes the pyramid model
@param vocab_size: Size of the overall vocabulary (vocab_size/num_classes per class)
@param tiles: Number of tiles for base level of the pyramid
'''
super(BoFModelPyramid, self).__init__(vocab_size)
self.tiles = tiles
return
def _pyramid2(self, features):
'''
Returns a 2 level pyramid
@param features: input features
'''
# Error case: Frame too small for pyramid, just assume symmetric frame
if len(features) < 2:
features = np.vstack((features, features))
# Compute 2 level pyramid
l_2 = int(len(features) / 2)
p_left = self.gmm.predict_proba(features[:l_2])
p_right = self.gmm.predict_proba(features[l_2:])
# Compute means
p_left = np.mean(p_left, axis=0)
p_right = np.mean(p_right, axis=0)
# Compute max pooling and stacking
term_vector = np.max((p_left, p_right), axis=0)
term_vector = np.hstack((term_vector, p_left, p_right))
return term_vector
def _pyramidn(self, features, num_tiles):
'''
Returns a n level pyramid at ground
with an additional top level
@param features: input features
@param num_tiles: number of pyramid tiles
'''
# Error case: Frame too small for pyramid, just assume symmetric frame
if len(features) < num_tiles:
stacked_feat = []
for _ in range(num_tiles):
stacked_feat.append(features)
features = np.vstack(stacked_feat)
# Compute tile size
l_n = len(features) / float(num_tiles)
# Compute probabilities for each tile
tiles = []
for i in range(num_tiles):
probas = self.gmm.predict_proba(features[int(i * l_n):int((i + 1) * l_n)])
probas = np.mean(probas, axis=0)
tiles.append(probas)
# Compute the top level
top_level = np.vstack(tiles)
top_level = np.max(top_level, axis=0)
# Unfold into term vector
tiles.append(top_level)
# Create overall feature representation
term_vector = np.hstack(tiles)
return term_vector
def train(self, datadict, labels):
'''
Trains the Pyramid Model with supervised codebooks
and soft quantization
'''
super(BoFModelPyramid, self).train(datadict, labels, rand_features=True)
self.featuresize = self.vocab_size
return
def getfeaturesize(self):
'''
Returns the feature size
'''
feat_size = (self.tiles + 1) * self.vocab_size
return feat_size
def getfeatures(self, features):
'''
Returns the Pyramid Bag-of-Super-Features representation
for the feature matrix [frames x features]
'''
# Normalize features
norm_features = self.norm.normalize(features)
# Compute pyramid representations
if self.tiles == 2:
return self._pyramid2(norm_features)
elif self.tiles > 2:
return self._pyramidn(norm_features, self.tiles)
else:
raise RuntimeError("No Pyramid level specified")
return None
def get_model(typename, cbsize, num_tiles):
'''
Returns a classifier object as a combination of a classifier with a model.
@param typename: type of temporal augmentation
'super', 'pyramid', cf.
A Bag-of-Features Approach to Acoustic Event Detection
Axel Plinge, Rene Grzeszick, Gernot A. Fink.
Int. Conf. on Acoustics, Speech and Signal Processing, 2014.
'temporal' cf.
Temporal Acoustic Words for Online Acoustic Event Detection
Rene Grzeszick, Axel Plinge, Gernot A. Fink.
German Conf. Pattern Recognition, Aachen, Germany, 2015.
@param cbsize: total codebook size
@param num_tiles: number of temporal subdivisions
'''
if typename == 'pyramid':
return BoFModelPyramid(cbsize, num_tiles)
if typename == 'temporal':
return BoFModelTemporal(cbsize, -num_tiles)
if typename == 'super':
return BoFModelSuper(cbsize)
return None
| 34.250569
| 117
| 0.607475
|
4a063a5d09a3e13b3e0317005b170e3ab665d18f
| 4,620
|
py
|
Python
|
previous_version/make_affinity_labels.py
|
CVI-SZU/CLIMS
|
9d3d0123b625b2c6941069e8fb359019a5cabd59
|
[
"MIT"
] | 4
|
2022-03-19T06:50:32.000Z
|
2022-03-26T03:25:51.000Z
|
previous_version/make_affinity_labels.py
|
CVI-SZU/CLIMS
|
9d3d0123b625b2c6941069e8fb359019a5cabd59
|
[
"MIT"
] | 1
|
2022-03-26T03:27:57.000Z
|
2022-03-30T07:00:43.000Z
|
previous_version/make_affinity_labels.py
|
CVI-SZU/CLIMS
|
9d3d0123b625b2c6941069e8fb359019a5cabd59
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from core.puzzle_utils import *
from core.networks import *
from core.datasets import *
from tools.general.io_utils import *
from tools.general.time_utils import *
from tools.general.json_utils import *
from tools.ai.log_utils import *
from tools.ai.demo_utils import *
from tools.ai.optim_utils import *
from tools.ai.torch_utils import *
from tools.ai.evaluate_utils import *
from tools.ai.augment_utils import *
from tools.ai.randaugment import *
parser = argparse.ArgumentParser()
###############################################################################
# Dataset
###############################################################################
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--data_dir', default='/data1/xjheng/dataset/VOC2012/', type=str)
###############################################################################
# Inference parameters
###############################################################################
parser.add_argument('--experiment_name', default='resnet50@seed=0@bs=16@ep=5@nesterov@train@scale=0.5,1.0,1.5,2.0', type=str)
parser.add_argument('--domain', default='train', type=str)
parser.add_argument('--fg_threshold', default=0.30, type=float)
parser.add_argument('--bg_threshold', default=0.05, type=float)
if __name__ == '__main__':
###################################################################################
# Arguments
###################################################################################
args = parser.parse_args()
from iputils import get_host_ip
ip = get_host_ip()
# print(ip)
if ip == '172.31.234.159':
args.data_dir = '/data1/xjheng/dataset/VOC2012/'
elif ip == '172.31.111.180':
args.data_dir = '/home/lzr/data/VOC/VOC2012/'
else:
raise NotImplementedError
experiment_name = args.experiment_name
pred_dir = f'./experiments/predictions/{experiment_name}/'
aff_dir = create_directory('./experiments/predictions/{}@aff_fg={:.2f}_bg={:.2f}/'.format(experiment_name, args.fg_threshold, args.bg_threshold))
set_seed(args.seed)
log_func = lambda string='': print(string)
###################################################################################
# Transform, Dataset, DataLoader
###################################################################################
# for mIoU
meta_dic = read_json('./data/VOC_2012.json')
dataset = VOC_Dataset_For_Making_CAM(args.data_dir, args.domain)
#################################################################################################
# Convert
#################################################################################################
eval_timer = Timer()
length = len(dataset)
for step, (ori_image, image_id, _, _) in enumerate(dataset):
png_path = aff_dir + image_id + '.png'
if os.path.isfile(png_path):
continue
# load
image = np.asarray(ori_image)
cam_dict = np.load(pred_dir + image_id + '.npy', allow_pickle=True).item()
ori_h, ori_w, c = image.shape
keys = cam_dict['keys']
cams = cam_dict['hr_cam']
# 1. find confident fg & bg
fg_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.fg_threshold)
fg_cam = np.argmax(fg_cam, axis=0)
fg_conf = keys[crf_inference_label(image, fg_cam, n_labels=keys.shape[0])]
bg_cam = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.bg_threshold)
bg_cam = np.argmax(bg_cam, axis=0)
bg_conf = keys[crf_inference_label(image, bg_cam, n_labels=keys.shape[0])]
# 2. combine confident fg & bg
conf = fg_conf.copy()
conf[fg_conf == 0] = 255
conf[bg_conf + fg_conf == 0] = 0
imageio.imwrite(png_path, conf.astype(np.uint8))
sys.stdout.write('\r# Convert [{}/{}] = {:.2f}%, ({}, {})'.format(step + 1, length, (step + 1) / length * 100, (ori_h, ori_w), conf.shape))
sys.stdout.flush()
print()
| 35.813953
| 149
| 0.540476
|
4a063b5b7c5fb44b10d0a8d15b61d1262e58eee6
| 3,728
|
py
|
Python
|
tests/test_binds.py
|
indietyp/flask-sqlalchemy
|
7ba8761f999bad18966a3e478a75d64c1ca11a7d
|
[
"BSD-3-Clause"
] | 1,917
|
2015-01-04T17:52:59.000Z
|
2019-02-01T13:44:53.000Z
|
tests/test_binds.py
|
indietyp/flask-sqlalchemy
|
7ba8761f999bad18966a3e478a75d64c1ca11a7d
|
[
"BSD-3-Clause"
] | 461
|
2015-01-02T09:38:33.000Z
|
2019-01-31T14:47:50.000Z
|
tests/test_binds.py
|
indietyp/flask-sqlalchemy
|
7ba8761f999bad18966a3e478a75d64c1ca11a7d
|
[
"BSD-3-Clause"
] | 590
|
2015-01-02T09:09:40.000Z
|
2019-02-01T07:19:37.000Z
|
from flask_sqlalchemy import get_state
from flask_sqlalchemy import SQLAlchemy
def test_basic_binds(app, db):
app.config["SQLALCHEMY_BINDS"] = {"foo": "sqlite://", "bar": "sqlite://"}
class Foo(db.Model):
__bind_key__ = "foo"
__table_args__ = {"info": {"bind_key": "foo"}}
id = db.Column(db.Integer, primary_key=True)
class Bar(db.Model):
__bind_key__ = "bar"
id = db.Column(db.Integer, primary_key=True)
class Baz(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# simple way to check if the engines are looked up properly
assert db.get_engine(app, None) == db.engine
for key in "foo", "bar":
engine = db.get_engine(app, key)
connector = app.extensions["sqlalchemy"].connectors[key]
assert engine == connector.get_engine()
assert str(engine.url) == app.config["SQLALCHEMY_BINDS"][key]
# do the models have the correct engines?
assert db.metadata.tables["foo"].info["bind_key"] == "foo"
assert db.metadata.tables["bar"].info["bind_key"] == "bar"
assert db.metadata.tables["baz"].info.get("bind_key") is None
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, "foo"))
assert len(metadata.tables) == 1
assert "foo" in metadata.tables
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, "bar"))
assert len(metadata.tables) == 1
assert "bar" in metadata.tables
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app))
assert len(metadata.tables) == 1
assert "baz" in metadata.tables
# do the session have the right binds set?
assert db.get_binds(app) == {
Foo.__table__: db.get_engine(app, "foo"),
Bar.__table__: db.get_engine(app, "bar"),
Baz.__table__: db.get_engine(app, None),
}
def test_abstract_binds(app, db):
app.config["SQLALCHEMY_BINDS"] = {"foo": "sqlite://"}
class AbstractFooBoundModel(db.Model):
__abstract__ = True
__bind_key__ = "foo"
class FooBoundModel(AbstractFooBoundModel):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# does the model have the correct engines?
assert db.metadata.tables["foo_bound_model"].info["bind_key"] == "foo"
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, "foo"))
assert len(metadata.tables) == 1
assert "foo_bound_model" in metadata.tables
def test_connector_cache(app):
db = SQLAlchemy()
db.init_app(app)
with app.app_context():
db.get_engine()
connector = get_state(app).connectors[None]
assert connector._app is app
def test_polymorphic_bind(app, db):
bind_key = "polymorphic_bind_key"
app.config["SQLALCHEMY_BINDS"] = {
bind_key: "sqlite:///:memory",
}
class Base(db.Model):
__bind_key__ = bind_key
__tablename__ = "base"
id = db.Column(db.Integer, primary_key=True)
p_type = db.Column(db.String(50))
__mapper_args__ = {"polymorphic_identity": "base", "polymorphic_on": p_type}
class Child1(Base):
child_1_data = db.Column(db.String(50))
__mapper_args__ = {
"polymorphic_identity": "child_1",
}
assert Base.__table__.info["bind_key"] == bind_key
assert Child1.__table__.info["bind_key"] == bind_key
def test_execute_with_binds_arguments(app, db):
app.config["SQLALCHEMY_BINDS"] = {"foo": "sqlite://", "bar": "sqlite://"}
db.create_all()
db.session.execute(
"SELECT true", bind_arguments={"bind": db.get_engine(app, "foo")}
)
| 29.354331
| 84
| 0.646996
|
4a063b61c06693c64ddb60557fd0d55e8be57e22
| 3,120
|
py
|
Python
|
harvester/fetcher/cmis_atom_feed_fetcher.py
|
amywieliczka/harvester
|
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
|
[
"BSD-3-Clause"
] | 5
|
2015-01-14T20:48:28.000Z
|
2015-05-13T15:31:12.000Z
|
harvester/fetcher/cmis_atom_feed_fetcher.py
|
amywieliczka/harvester
|
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
|
[
"BSD-3-Clause"
] | 87
|
2015-01-09T00:17:44.000Z
|
2021-12-13T19:37:44.000Z
|
harvester/fetcher/cmis_atom_feed_fetcher.py
|
amywieliczka/harvester
|
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
|
[
"BSD-3-Clause"
] | 4
|
2015-02-26T23:27:44.000Z
|
2019-06-11T21:43:17.000Z
|
# -*- coding: utf-8 -*-
import requests
from requests.auth import HTTPBasicAuth
from xml.etree import ElementTree as ET
from xmljson import badgerfish
from .fetcher import Fetcher
class CMISAtomFeedFetcher(Fetcher):
'''harvest a CMIS Atom Feed. Don't know how generic this is, just working
with Oakland Public Library Preservica implementation.
Right now this uses the "descendants" page for collections, this gets all
the data for one collection from one http request then parses the resulting
data. This might not work if we get collections much bigger than the
current ones (~1000 objects max)
'''
def __init__(self, url_harvest, extra_data, **kwargs):
'''Grab file and copy to local temp file'''
super(CMISAtomFeedFetcher, self).__init__(url_harvest, extra_data)
# parse extra data for username,password
uname, pswd = extra_data.split(',')
resp = requests.get(url_harvest,
auth=HTTPBasicAuth(uname.strip(), pswd.strip()))
self.tree = ET.fromstring(resp.content)
self.objects = [
badgerfish.data(x)
for x in self.tree.findall('./{http://www.w3.org/2005/Atom}'
'entry/{http://docs.oasis-open.org/'
'ns/cmis/restatom/200908/}children//'
'{http://www.w3.org/2005/Atom}entry')
]
self.objects_iter = iter(self.objects)
def next(self):
return self.objects_iter.next()
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| 49.52381
| 79
| 0.707692
|
4a063bf10034c89d16ab77e6291a5de96a2693b8
| 2,651
|
py
|
Python
|
Tools/peg_generator/scripts/benchmark.py
|
oleksandr-pavlyk/cpython
|
eb002dbe0da9622245a355db5f0cd5aa2fc70b40
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Tools/peg_generator/scripts/benchmark.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Tools/peg_generator/scripts/benchmark.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
#!/usr/bin/env python3
import argparse
import ast
import sys
import os
from time import time
try:
import memory_profiler
except ModuleNotFoundError:
print(
"Please run `make venv` to create a virtual environment and install"
" all the dependencies, before running this script."
)
sys.exit(1)
sys.path.insert(0, os.getcwd())
from scripts.test_parse_directory import parse_directory
argparser = argparse.ArgumentParser(
prog="benchmark", description="Reproduce the various pegen benchmarks"
)
argparser.add_argument(
"--target",
action="store",
choices=["xxl", "stdlib"],
default="xxl",
help="Which target to use for the benchmark (default is xxl.py)",
)
subcommands = argparser.add_subparsers(title="Benchmarks", dest="subcommand")
command_compile = subcommands.add_parser(
"compile", help="Benchmark parsing and compiling to bytecode"
)
command_parse = subcommands.add_parser("parse", help="Benchmark parsing and generating an ast.AST")
def benchmark(func):
def wrapper(*args):
times = list()
for _ in range(3):
start = time()
result = func(*args)
end = time()
times.append(end - start)
memory = memory_profiler.memory_usage((func, args))
print(f"{func.__name__}")
print(f"\tTime: {sum(times)/3:.3f} seconds on an average of 3 runs")
print(f"\tMemory: {max(memory)} MiB on an average of 3 runs")
return result
return wrapper
@benchmark
def time_compile(source):
return compile(source, "<string>", "exec")
@benchmark
def time_parse(source):
return ast.parse(source)
def run_benchmark_xxl(subcommand, source):
if subcommand == "compile":
time_compile(source)
elif subcommand == "parse":
time_parse(source)
def run_benchmark_stdlib(subcommand):
modes = {"compile": 2, "parse": 1}
for _ in range(3):
parse_directory(
"../../Lib",
verbose=False,
excluded_files=[
"*/bad*",
"*/lib2to3/tests/data/*",
],
short=True,
mode=modes[subcommand],
)
def main():
args = argparser.parse_args()
subcommand = args.subcommand
target = args.target
if subcommand is None:
argparser.error("A benchmark to run is required")
if target == "xxl":
with open(os.path.join("data", "xxl.py"), "r") as f:
source = f.read()
run_benchmark_xxl(subcommand, source)
elif target == "stdlib":
run_benchmark_stdlib(subcommand)
if __name__ == "__main__":
main()
| 25.009434
| 99
| 0.626179
|
4a063c9d6d772559f0158b344950a4d8e6837df9
| 185,766
|
py
|
Python
|
Application/WorkflowDeveloper.py
|
AO-StreetArt/AOWorkflowDeveloper
|
ac041b46a9cc9c346c46d2c061f9588c21bf9948
|
[
"MIT"
] | null | null | null |
Application/WorkflowDeveloper.py
|
AO-StreetArt/AOWorkflowDeveloper
|
ac041b46a9cc9c346c46d2c061f9588c21bf9948
|
[
"MIT"
] | null | null | null |
Application/WorkflowDeveloper.py
|
AO-StreetArt/AOWorkflowDeveloper
|
ac041b46a9cc9c346c46d2c061f9588c21bf9948
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.checkbox import CheckBox
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.properties import ListProperty, StringProperty, BooleanProperty, ObjectProperty, NumericProperty
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.actionbar import ActionBar, ActionView, ActionButton, ActionGroup
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.popup import Popup
from kivy.uix.spinner import Spinner
from kivy.uix.scrollview import ScrollView
from kivy.clock import Clock
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
from src.KeyActionCarouselItem import KeyActionCarouselItem
from src.KeyActionPopup import KeyActionPopup
from src.WFCarouselItem import WFCarouselItem
from src.flowcharts.Connector import Connector
from src.flowcharts.DragGrid import DragGrid, DragGridCell
from src.flowcharts.FlowChartNode2 import FlowChartNode, DraggableImage
from src.flowcharts.DraggableOption import DraggableOption
from src.dbadmin.Translator import CSVTranslator, ExcelTranslator, ExternalDBTranslator, Translator
from src.dbadmin.DataBuffer import DataBuffer
from src.dbadmin.Writer import CSVWriter, ExcelWriter, TerminalWriter
from src.dbadmin.DataStream import DataStream
from src.export.ExcelExporter import TemplateReader
import os
import os.path
import platform
import xml.etree.ElementTree as ET
#------------------------------------------------------------
#----------------Configuration File Read---------------------
#------------------------------------------------------------
#Read the XML Config File
_config_path = os.path.abspath('../Configuration/config.xml')
tree = ET.parse(_config_path)
root = tree.getroot()
name=''
value=''
engine_path = 'sqlite:///test.db'
engine_name = 'test.db'
sqlite_logging="debug"
popup_filter_limit=10
for child in root:
#Allow for the XML File to be split into segments
for param in child:
name=''
value=''
for val in param:
if val.tag == 'Name':
name=val.text
elif val.tag == 'Value':
value=val.text
#Find the name specified and assign the appropriate environment variable
if name != '' and value != '':
if name == 'EnginePath':
engine_path=value
elif name == 'EngineName':
engine_name=value
elif name == 'SQLiteLogging':
sqlite_logging=value
elif name == 'PopupFilterLimit':
popup_filter_limit=int(float(value))
#------------------------------------------------------------
#----------------ORM-----------------------------------------
#------------------------------------------------------------
#Instantiate the Declarative Base Class
Base = declarative_base()
Logger.info('SQLAlchemy: Declaritive Base Instantiated')
#Store the base level key action
class KeyAction(Base):
__tablename__ = 'keyaction'
id = Column(Integer, primary_key=True)
systemareaid = Column(Integer, ForeignKey('systemarea.id'))
name = Column(String)
description = Column(String)
custom = Column(Boolean)
sys = relationship("SystemArea", backref=backref('keyaction', order_by=id))
def __repr_(self):
return "<Key Action: ID = '%s', System Area ID = '%s', Name = '%s', Description = '%s', Custom = '%s'>" % (self.id, self.systemareaid, self.name, self.description, self.custom)
class KeyActionImport(Base):
__tablename__ = 'keyaction_import'
id = Column(Integer, primary_key=True)
keyactionid = Column(Integer, ForeignKey('keyaction.id'))
importid = Column(Integer)
act = relationship("KeyAction", backref=backref('keyaction_import', order_by=id))
def __repr_(self):
return "<Key Action: ID = '%s'>" % (self.id)
#Store the base level system area
class SystemArea(Base):
__tablename__ = 'systemarea'
id = Column(Integer, primary_key=True)
moduleid = Column(Integer, ForeignKey('module.id'))
name = Column(String)
mod = relationship("Module", backref=backref('systemarea', order_by=id))
def __repr_(self):
return "<System Area: ID = '%s', Module ID = '%s', Name = '%s'>" % (self.id, self.moduleid, self.name)
class SystemAreaImport(Base):
__tablename__ = 'systemarea_import'
id = Column(Integer, primary_key=True)
systemareaid = Column(Integer, ForeignKey('systemarea.id'))
importid = Column(Integer)
act = relationship("SystemArea", backref=backref('systemarea_import', order_by=id))
def __repr_(self):
return "<System Area: ID = '%s'>" % (self.id)
#Store the base level module
class Module(Base):
__tablename__ = 'module'
id = Column(Integer, primary_key=True)
productid = Column(Integer, ForeignKey('product.id'))
name = Column(String)
mod = relationship("Product", backref=backref('module', order_by=id))
def __repr_(self):
return "<Module: ID = '%s', Name = '%s', Product = %s>" % (self.id, self.name, self.productid)
class ModuleImport(Base):
__tablename__ = 'module_import'
id = Column(Integer, primary_key=True)
moduleid = Column(Integer, ForeignKey('module.id'))
importid = Column(Integer)
act = relationship("Module", backref=backref('module_import', order_by=id))
def __repr_(self):
return "<Module: ID = '%s'>" % (self.id)
#Store the base level product
class Product(Base):
__tablename__ = 'product'
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr_(self):
return "<Product: ID = '%s', Name = '%s'>" % (self.id, self.name)
class ProductImport(Base):
__tablename__ = 'product_import'
id = Column(Integer, primary_key=True)
productid = Column(Integer, ForeignKey('product.id'))
importid = Column(Integer)
act = relationship("Product", backref=backref('product_import', order_by=id))
def __repr_(self):
return "<Product: ID = '%s'>" % (self.id)
#Store the base level input parameter
class InputParameter(Base):
__tablename__ = 'inputparameter'
id = Column(Integer, primary_key=True)
keyactionid = Column(Integer, ForeignKey('keyaction.id'))
name = Column(String)
act = relationship("KeyAction", backref=backref('inputparameter', order_by=id), cascade="all, delete, delete-orphan", single_parent=True)
def __repr_(self):
return "<Input Parameter: ID = '%s', Key Action ID = '%s', Name = '%s'>" % (self.id, self.keyactionid, self.name)
class InputParameterImport(Base):
__tablename__ = 'inputparameter_import'
id = Column(Integer, primary_key=True)
inputparameterid = Column(Integer, ForeignKey('inputparameter.id'))
importid = Column(Integer)
act = relationship("InputParameter", backref=backref('inputparameter_import', order_by=id))
def __repr_(self):
return "<Input Parameter: ID = '%s'>" % (self.id)
#Store the base level client
class Client(Base):
__tablename__ = 'client'
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr_(self):
return "<Client: ID = '%s', Name = '%s'>" % (self.id, self.name)
class ClientImport(Base):
__tablename__ = 'client_import'
id = Column(Integer, primary_key=True)
clientid = Column(Integer, ForeignKey('client.id'))
importid = Column(Integer)
act = relationship("Client", backref=backref('client_import', order_by=id))
def __repr_(self):
return "<Client: ID = '%s'>" % (self.id)
#Store the base level project
class Project(Base):
__tablename__ = 'project'
id = Column(Integer, primary_key=True)
clientid = Column(Integer, ForeignKey('client.id'))
name = Column(String)
mod = relationship("Client", backref=backref('project', order_by=id))
def __repr_(self):
return "<Project: ID = '%s', Client ID = '%s', Name = '%s'>" % (self.id, self.clientid, self.name)
class ProjectImport(Base):
__tablename__ = 'project_import'
id = Column(Integer, primary_key=True)
projectid = Column(Integer, ForeignKey('project.id'))
importid = Column(Integer)
act = relationship("Project", backref=backref('project_import', order_by=id))
def __repr_(self):
return "<Project: ID = '%s'>" % (self.id)
#Store the base level system area
class TestScript(Base):
__tablename__ = 'testscript'
id = Column(Integer, primary_key=True)
projectid = Column(Integer, ForeignKey('project.id'))
name = Column(String)
mod = relationship("Project", backref=backref('testscript', order_by=id))
def __repr_(self):
return "<Test Script: ID = '%s', Project ID = '%s', Name = '%s'>" % (self.id, self.projectid, self.name)
class TestScriptImport(Base):
__tablename__ = 'testscript_import'
id = Column(Integer, primary_key=True)
testscriptid = Column(Integer, ForeignKey('testscript.id'))
importid = Column(Integer)
act = relationship("TestScript", backref=backref('testscript_import', order_by=id))
def __repr_(self):
return "<Test Script: ID = '%s'>" % (self.id)
#Store the base level system area
class Workflow(Base):
__tablename__ = 'workflow'
id = Column(Integer, primary_key=True)
testscriptid = Column(Integer, ForeignKey('testscript.id'))
name = Column(String)
mod = relationship("TestScript", backref=backref('workflow', order_by=id))
def __repr_(self):
return "<System Area: ID = '%s', Module ID = '%s', Name = '%s'>" % (self.id, self.moduleid, self.name)
class WorkflowImport(Base):
__tablename__ = 'workflow_import'
id = Column(Integer, primary_key=True)
workflowid = Column(Integer, ForeignKey('workflow.id'))
importid = Column(Integer)
act = relationship("Workflow", backref=backref('workflow_import', order_by=id))
def __repr_(self):
return "<Workflow: ID = '%s'>" % (self.id)
class WorkflowAction(Base):
__tablename__ = 'workflowaction'
id = Column(Integer, primary_key=True)
keyactionid = Column(Integer, ForeignKey('keyaction.id'))
workflowid = Column(Integer, ForeignKey('workflow.id'))
expectedresult = Column(String)
notes = Column(String)
fail = Column(Boolean)
ka = relationship("KeyAction", backref=backref('workflowaction', order_by=id), single_parent=True)
wf = relationship("Workflow", backref=backref('workflowaction', order_by=id))
def __repr_(self):
return "<Workflow Action: ID = '%s', Key Action ID = '%s', Expected Results = '%s', Notes = '%s', Fail = '%s'>" % (self.id, self.keyactionid, self.expectedresult, self.notes, self.fail)
class WorkflowActionImport(Base):
__tablename__ = 'workflowaction_import'
id = Column(Integer, primary_key=True)
workflowactionid = Column(Integer, ForeignKey('workflowaction.id'))
importid = Column(Integer)
act = relationship("WorkflowAction", backref=backref('workflowaction_import', order_by=id))
def __repr_(self):
return "<Workflow Action: ID = '%s'>" % (self.id)
class WorkflowNextAction(Base):
__tablename__ = 'workflownextaction'
id = Column(Integer, primary_key=True)
keyactionid = Column(Integer, ForeignKey('workflowaction.id'))
nextactionid = Column(Integer)
act = relationship("WorkflowAction", backref=backref('workflownextaction', order_by=id))
def __repr_(self):
return "<Workflow Next Action: ID = '%s', Key Action ID = '%s', Next Action ID = '%s'>" % (self.id, self.keyactionid, self.nextactionid)
class WorkflowNextActionImport(Base):
__tablename__ = 'workflownextaction_import'
id = Column(Integer, primary_key=True)
workflownextactionid = Column(Integer, ForeignKey('workflownextaction.id'))
importid = Column(Integer)
act = relationship("WorkflowNextAction", backref=backref('workflownextaction_import', order_by=id))
def __repr_(self):
return "<Workflow Next Action: ID = '%s'>" % (self.id)
class WorkflowParameter(Base):
__tablename__ = 'workflowparam'
id = Column(Integer, primary_key=True)
inputparamid = Column(Integer, ForeignKey('inputparameter.id'))
keyactionid = Column(Integer, ForeignKey('workflowaction.id'))
value = Column(String)
act = relationship("WorkflowAction", backref=backref('workflowparam', order_by=id))
ip = relationship("InputParameter", backref=backref('workflowparam', order_by=id), single_parent=True)
def __repr_(self):
return "<Workflow Parameter: ID = '%s', Input Parameter ID = '%s', Key Action ID = '%s', Value = '%s'>" % (self.id, self.inputparamid, self.keyactionid, self.value)
class WorkflowParameterImport(Base):
__tablename__ = 'workflowparam_import'
id = Column(Integer, primary_key=True)
workflowparameterid = Column(Integer, ForeignKey('workflowparam.id'))
importid = Column(Integer)
act = relationship("WorkflowParameter", backref=backref('workflowparam_import', order_by=id))
def __repr_(self):
return "<Workflow Parameter: ID = '%s'>" % (self.id)
class FlowchartPosition(Base):
__tablename__ = 'flowchart'
id = Column(Integer, primary_key=True)
keyactionid = Column(Integer, ForeignKey('workflowaction.id'))
row = Column(Integer)
col = Column(Integer)
act = relationship("WorkflowAction", backref=backref('flowchart', order_by=id))
class FlowchartPositionImport(Base):
__tablename__ = 'flowchart_import'
id = Column(Integer, primary_key=True)
flowchartpositionid = Column(Integer, ForeignKey('flowchart.id'))
importid = Column(Integer)
act = relationship("FlowchartPosition", backref=backref('flowchart_import', order_by=id))
def __repr_(self):
return "<Flowchart: ID = '%s'>" % (self.id)
#------------------------------------------------------------
#----------------SQLAlchemy Connections----------------------
#------------------------------------------------------------
#Figure out whether we are running on windows or unix
#Connect to the DB
#echo=True turns on query logging
#echo="debug" turns on query + result logging
#echo=False turns off query logging
if platform.system() == 'Windows':
engine = create_engine(engine_path, echo=sqlite_logging)
else:
engine = create_engine(engine_path, echo=sqlite_logging)
#Connect to the DB
#echo=True turns on query logging
#echo="debug" turns on query + result logging
#echo=False turns off query logging
#engine = create_engine(engine_path, echo="debug")
Logger.info('SQLAlchemy: Engine Created')
#Database analyzed & created if necessary
if not os.path.exists(engine_name):
Base.metadata.create_all(engine)
Logger.info('SQLAlchemy: Database Analyzed and Created if Necessary')
#Create the Session Factory
Session = sessionmaker(bind=engine)
session = Session()
Logger.info('SQLAlchemy: Session Created')
#------------------------------------------------------------
#----------------DB Seed Scripts-----------------------------
#------------------------------------------------------------
#Find if the seed data already exists
seed_products = session.query(Product).filter(Product.name=='Default').all()
seed_clients = session.query(Client).filter(Client.name=='Default').all()
seed_projects = session.query(Project).filter(Project.name=='Default').all()
seed_testscripts = session.query(TestScript).filter(TestScript.name=='Default').all()
seed_workflows = session.query(Workflow).filter(Workflow.name=='Default').all()
#Add the seed data
if len(seed_products) == 0:
seed_product = Product(name='Default')
session.add(seed_product)
else:
seed_product = seed_products[0]
if len(seed_clients) == 0:
seed_client = Client(name='Default')
session.add(seed_client)
else:
seed_client = seed_clients[0]
session.commit()
if len(seed_projects) == 0:
seed_project = Project(name='Default', clientid=seed_client.id)
session.add(seed_project)
else:
seed_project = seed_projects[0]
session.commit()
if len(seed_testscripts) == 0:
seed_testscript = TestScript(name='Default', projectid=seed_project.id)
session.add(seed_testscript)
else:
seed_testscript = seed_testscripts[0]
session.commit()
if len(seed_workflows) == 0:
seed_workflow = Workflow(name='Default', testscriptid=seed_testscript.id)
session.add(seed_workflow)
else:
seed_workflow = seed_workflows[0]
#------------------------------------------------------------
#----------------Filter Manager------------------------------
#------------------------------------------------------------
class FilterManager():
#Class to manage filtering within the application
def __init__(self):
self.page = 1
self.pageLength = 5
self.customEnabled = False
Logger.info('Filter: Filter Manager Created')
#Getters & Setters
def GetCurrentPage(self):
return self.page
def GetPageLength(self):
return self.pageLength
def SetPageLength(self, newLength):
self.pageLength = newLength
Logger.debug('Filter: Page Length Set')
def isCustomFilteringEnabled(self):
return self.customEnabled
def setCustomFilteringEnabled(self, newcust):
self.customEnabled = newcust
#Pagination
def NextPage_KA(self, module, sysarea, keyaction, custom, current_product):
Logger.debug('Filter: Next Page')
self.page = self.page + 1
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
res = self.GetKeyActionResults(module, sysarea, keyaction, custom, limit, offset, current_product)
Logger.debug('Filter: Filter Applied')
if len(res) == 0:
self.page = 1
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
return self.GetKeyActionResults(module, sysarea, keyaction, custom, limit, offset, current_product)
else:
return res
def PrevPage_KA(self, module, sysarea, keyaction, custom, current_product):
Logger.debug('Filter: Previous Page')
if self.page != 1:
self.page = self.page - 1
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
return self.GetKeyActionResults(module, sysarea, keyaction, custom, limit, offset, current_product)
#Utility Method
def FirstPage(self):
self.page = 1
Logger.debug('Filter: First Page')
#Filtering
def ApplyFilter(self, module, sysarea, keyaction, custom, current_product):
#Instantiate a session each time we need to connect to the DB
self.pageLength = 20
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
Logger.debug('Filter: Key Action Filter Applied')
return self.GetKeyActionResults(module, sysarea, keyaction, custom, limit, offset, current_product)
def SimpleFilter(self):
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
return session.query(KeyAction).order_by(KeyAction.id)[limit:offset]
def ApplyWorkflowFilter(self, workflow, module, sysarea, keyaction, custom):
#Instantiate a session each time we need to connect to the DB
self.pageLength = 10
limit = ((self.page - 1) * self.pageLength)
offset = self.pageLength + ((self.page - 1) * self.pageLength)
#Apply the filter
Logger.debug('Filter: Workflow Filter Applied')
return self.GetWorkflowResults(workflow, module, sysarea, keyaction, custom, limit, offset)
def FindTestScripts(self, workflow, testscript, client, project, limit, offset):
if (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(TestScript).\
order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).order_by(TestScript.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(TestScript).order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(TestScript).join(Project).\
filter(Project.name == project).order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(TestScript).\
filter(TestScript.name == testscript).order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).filter(Project.name == project).\
order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).filter(TestScript.name == testscript).\
order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None) and (client == "" or client is None):
results = session.query(TestScript).join(Project).\
filter(TestScript.name == testscript).filter(Project.name == project).\
order_by(TestScript.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).\
order_by(TestScript.id)[limit:offset]
elif (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(TestScript).join(Project).\
filter(Project.name == project).\
order_by(TestScript.id)[limit:offset]
elif (project == "" or project is None) and (client == "" or client is None):
results = session.query(TestScript).\
filter(TestScript.name == testscript).\
order_by(TestScript.id)[limit:offset]
elif (workflow == "" or workflow is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).filter(Project.name == project).\
filter(TestScript.name == testscript).order_by(TestScript.id)[limit:offset]
elif (testscript == "" or testscript is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).filter(Project.name == project).\
order_by(TestScript.id)[limit:offset]
elif (project == "" or project is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).\
filter(TestScript.name == testscript).order_by(TestScript.id)[limit:offset]
elif (client == "" or client is None):
results = session.query(TestScript).join(Project).join(Client).\
filter(Project.name == project).\
filter(TestScript.name == testscript).order_by(TestScript.id)[limit:offset]
else:
results = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == client).filter(Project.name == project).\
filter(TestScript.name == testscript).\
order_by(TestScript.id)[limit:offset]
return results
def FindWorkflows(self, workflow, testscript, client, project, limit, offset):
if (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).\
order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).order_by(Workflow.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(Workflow).join(TestScript).join(Project).\
filter(Project.name.like('%' + str(project) + '%')).order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).join(TestScript).\
filter(TestScript.name.like('%' + str(testscript) + '%')).order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None) and (client == "" or client is None):
results = session.query(Workflow).join(TestScript).join(Project).\
filter(TestScript.name.like('%' + str(testscript) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(Workflow).join(TestScript).join(Project).\
filter(Workflow.name.like('%' + str(workflow) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).join(TestScript).\
filter(Workflow.name.like('%' + str(workflow) + '%')).filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(Workflow.id)[limit:offset]
elif (workflow == "" or workflow is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).order_by(Workflow.id)[limit:offset]
elif (testscript == "" or testscript is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).order_by(Workflow.id)[limit:offset]
elif (project == "" or project is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Workflow.name.like('%' + str(workflow) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).order_by(Workflow.id)[limit:offset]
elif (client == "" or client is None):
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name.like('%' + str(workflow) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).order_by(Workflow.id)[limit:offset]
else:
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(Workflow.id)[limit:offset]
return results
def GetKeyActionResults(self, module, sysarea, keyaction, cust, limit, offset, current_product):
if self.customEnabled == True:
if cust == 'False' or cust == False or cust == 0:
custom = 0
else:
custom = 1
if (module == "" or module is None) and (sysarea == "" or sysarea is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.custom == custom).filter(Product.name == current_product).\
order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None) and (sysarea == "" or sysarea is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
elif (sysarea == "" or sysarea is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(Module.name.like('%' + str(module) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
elif (sysarea == "" or sysarea is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).\
filter(Module.name.like('%' + str(module) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
elif (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).\
filter(Module.name.like('%' + str(module) + '%')).filter(Product.name == current_product).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
else:
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Module.name.like('%' + str(module) + '%')).\
filter(KeyAction.custom == custom).order_by(KeyAction.id)[limit:offset]
else:
if (module == "" or module is None) and (sysarea == "" or sysarea is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).\
join(Product).filter(Product.name == current_product).\
order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None) and (sysarea == "" or sysarea is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Product.name == current_product).\
order_by(KeyAction.id)[limit:offset]
elif (sysarea == "" or sysarea is None) and (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(Module.name.like('%' + str(module) + '%')).filter(Product.name == current_product).\
order_by(KeyAction.id)[limit:offset]
elif (module == "" or module is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).order_by(KeyAction.id)[limit:offset]
elif (sysarea == "" or sysarea is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
filter(Module.name.like('%' + str(module) + '%')).order_by(KeyAction.id)[limit:offset]
elif (keyaction == "" or keyaction is None):
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Product.name == current_product).\
filter(Module.name.like('%' + str(module) + '%')).order_by(KeyAction.id)[limit:offset]
else:
results = session.query(KeyAction).join(SystemArea).join(Module).join(Product).\
filter(KeyAction.name.like('%' + str(keyaction) + '%')).filter(Product.name == current_product).\
filter(SystemArea.name.like('%' + str(sysarea) + '%')).filter(Module.name.like('%' + str(module) + '%')).\
order_by(KeyAction.id)[limit:offset]
return results
#TO-DO: Update Query
def GetWorkflowResults(self, workflow, testscript, project, client, limit, offset):
if (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None) and (workflow == "" or workflow is None):
results = session.query(WorkflowAction).\
order_by(WorkflowAction.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Project.name.like('%' + str(project) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (testscript == "" or testscript is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(Project.name.like('%' + str(project) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (testscript == "" or testscript is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (testscript == "" or testscript is None) and (project == "" or project is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like("'%s'" % (client))).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (project == "" or project is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (project == "" or project is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None) and (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (testscript == "" or testscript is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (project == "" or project is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (client == "" or client is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
elif (workflow == "" or workflow is None):
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
else:
results = session.query(Workflow).join(WorkflowAction).\
join(TestScript).join(Project).join(Client).\
filter(Client.name.like('%' + str(client) + '%')).\
filter(Project.name.like('%' + str(project) + '%')).\
filter(TestScript.name.like('%' + str(testscript) + '%')).\
filter(Workflow.name.like('%' + str(workflow) + '%')).\
order_by(WorkflowAction.id)[limit:offset]
return results
#------------------------------------------------------------
#----------------DB Writer-----------------------------------
#------------------------------------------------------------
class DatabaseWriter():
#Class to abstract away saving of key objects to the database
def __init__(self):
Logger.info('DBWriter: Database Writer Created')
def SaveInputParameter(self, ip_name, ka_name):
#Check if the input parameter exists
ip = session.query(InputParameter).join(KeyAction).filter(KeyAction.name==ka_name).filter(InputParameter.name==ip_name).all()
if len(ip) == 0:
keyaction = session.query(KeyAction).filter(KeyAction.name==ka_name).all()
inputparameter = InputParameter(name=ip_name, keyactionid=keyaction[0].id)
session.add(inputparameter)
else:
inputparameter = ip[0]
session.commit()
def SaveWorkflowParameter(self, ip_name, action_name, flow_name, param_value, testscript, project, client):
ip = session.query(InputParameter).join(KeyAction).\
filter(KeyAction.name==action_name).filter(InputParameter.name==ip_name).all()
ka = session.query(KeyAction).filter(KeyAction.name==action_name).all()
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==flow_name).filter(TestScript.name==testscript).\
filter(Project.name==project).filter(Client.name==client).all()
if len(ka) > 0 and len(wf) > 0:
wfp = session.query(WorkflowParameter).join(WorkflowAction).join(Workflow).\
filter(Workflow.id==wf[0].id).filter(WorkflowParameter.inputparamid==ip[0].id).all()
else:
wfp = []
if len(wfp) == 0:
wfa = session.query(WorkflowAction).join(KeyAction).join(Workflow).\
filter(KeyAction.id==ka[0].id).filter(Workflow.id==wf[0].id).all()
param = WorkflowParameter(inputparamid=ip[0].id, keyactionid=wfa[0].id, value = param_value)
session.add(param)
else:
wfp[0].value = param_value
session.commit()
def SaveKeyAction(self, product, module, sysarea, name, desc, custom, ip_list):
#Check if the module exists
mod = session.query(Module).filter(Module.name==module).all()
if len(mod) == 0:
prod = session.query(Product).filter(Product.name==product).all()
if len(prod) != 0:
prod_id = prod[0].id
else:
prod_id = 1
module = Module(name=module, productid = prod_id)
session.add(module)
else:
module = mod[0]
#Check if the system area exists
sa = session.query(SystemArea).filter(SystemArea.name==sysarea).all()
if len(sa) == 0:
sysarea = SystemArea(name=sysarea)
session.add(sysarea)
else:
sysarea = sa[0]
#Check if the key action exists
ka = session.query(KeyAction).filter(KeyAction.name==name).all()
if len(ka) == 0:
keyaction = KeyAction(name=name)
session.add(keyaction)
else:
keyaction = ka[0]
session.commit()
#Assign the keyaction to the system area and module
keyaction.systemareaid = sysarea.id
sysarea.moduleid = module.id
#Assign the description & custom
keyaction.description = desc
if custom == True or custom == 'True' or custom == 'true':
keyaction.custom = True
else:
keyaction.custom = False
#Input Parameters
#Assumes that ip_list is passed in as a list of text inputs or as a list of strings
for ip in ip_list:
if isinstance(ip, basestring):
self.SaveInputParameter(ip, keyaction.name)
else:
self.SaveInputParameter(ip.text, keyaction.name)
session.commit()
def SaveWorkflowAction(self, action_name, flow_name, expected_results, ip_value_list, testscript, project, client):
ka = session.query(KeyAction).filter(KeyAction.name==action_name).all()
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==flow_name).filter(TestScript.name==testscript).\
filter(Project.name==project).filter(Client.name==client).all()
ips = session.query(InputParameter).join(KeyAction).filter(KeyAction.name == action_name).all()
i = 0
#Check if the workflow action exists
if len(ka) > 0 and len(wf) > 0:
wfa = session.query(WorkflowAction).join(Workflow).\
filter(Workflow.id==wf[0].id).filter(WorkflowAction.keyactionid==ka[0].id).all()
else:
wfa=[]
if len(wfa) == 0:
action = WorkflowAction(keyactionid=ka[0].id, workflowid=wf[0].id, expectedresult=expected_results)
session.add(action)
else:
wfa[0].expectedresult = expected_results
for ip_value in ip_value_list:
self.SaveWorkflowParameter(ips[i].name, action_name, flow_name, ip_value, testscript, project, client)
i+=1
session.commit()
def SaveConnectionsList(self, con_list, workflow, testscript, project, client):
start = con_list[0]
end = con_list[1]
i=0
#Delete all the existing next actions in the db for the workflow
n_actions = session.query(WorkflowNextAction).join(WorkflowAction).join(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name == workflow).filter(TestScript.name == testscript).\
filter(Project.name == project).filter(Client.name == client).all()
for action in n_actions:
session.delete(action)
session.commit()
#Iterate through the lists
for celement in con_list[0]:
#A celement of index i from con_list[0] is the start connector
#A celement of index i from con_list[1] is the end connector
#Find next action id
nextaction_id = session.query(WorkflowAction).join(KeyAction).join(Workflow).\
join(TestScript).join(Project).join(Client).filter(KeyAction.name==end[i].label.img.text).\
filter(Workflow.name == workflow).filter(TestScript.name == testscript).\
filter(Project.name == project).filter(Client.name == client).all()
firstaction_id = session.query(WorkflowAction).join(KeyAction).join(Workflow).\
join(TestScript).join(Project).join(Client).filter(KeyAction.name==celement.label.img.text).\
filter(Workflow.name == workflow).filter(TestScript.name == testscript).\
filter(Project.name == project).filter(Client.name == client).all()
#Check if the next action exists within the workflow
nxa = session.query(WorkflowNextAction).join(WorkflowAction).join(Workflow).join(TestScript).join(Project).join(Client).\
filter(WorkflowNextAction.keyactionid==firstaction_id[0].id).\
filter(WorkflowNextAction.nextactionid==nextaction_id[0].id).\
filter(Workflow.name == workflow).filter(TestScript.name == testscript).\
filter(Project.name == project).filter(Client.name == client).all()
if len(nxa) == 0:
#Find the key action and next key action
ka = firstaction_id[0]
na = nextaction_id[0]
#Create a new workflow next action
next_action = WorkflowNextAction(keyactionid=ka.id, nextactionid=na.id)
session.add(next_action)
session.commit()
else:
next_action = nxa
i+=1
session.commit()
def SaveKeyActionByID(self, child, id):
#Module
rows = session.query(Module).join(SystemArea).join(KeyAction).filter(KeyAction.id == id).all()
if len(rows) > 1:
Logger.debug('Business Key Violation encountered in Module table')
elif len(rows) == 1:
rows[0].name = child.module_in.text
session.commit()
Logger.debug('QKA: Module Committed %s' % (child.module_in.text))
#System Area
sa_rows = session.query(SystemArea).join(KeyAction).filter(KeyAction.id == id).all()
if len(sa_rows) > 1:
Logger.debug('Business Key Violation encountered in System Area table')
elif len(sa_rows) == 1:
sa_rows[0].name = child.sa_in.text
sa_rows[0].moduleid = rows[0].id
session.commit()
Logger.debug('QKA: System Area Committed %s' % (child.sa_in.text))
#Key Action
ka_rows = session.query(KeyAction).filter(KeyAction.id == id).all()
if len(ka_rows) > 1:
Logger.debug('Business Key Violation encountered in Key Action table')
elif len(ka_rows) == 1:
Logger.debug('QKA: Single Key Action found')
ka_rows[0].name = child.ka_in.text
ka_rows[0].systemareaid = sa_rows[0].id
ka_rows[0].description = child.desc_in.text
ka_rows[0].custom = child.custom_in.active
session.commit()
Logger.debug('QKA: Key Action Committed %s' % (child.ka_in.text))
return ka_rows
def ValidateInputParameter(self, input_list, ip_list, ip_id, orig_ip_list):
#Input List gives a list of text inputs
#IP List gives a list of IP Name Strings to check against
#ID is the Key ActionID
#Origin IP List gives a list of the input parameter ID's in the sameorder as the ip list
#How many existing parameters do we have on the action?
inputparams = session.query(InputParameter).join(KeyAction).filter(KeyAction.id == ip_id).all()
#Fill the existing parameters first
i=0
for param in inputparams:
for i in range(0, len(orig_ip_list)):
if param.id == orig_ip_list[i]:
param.name = input_list[i].text
Logger.debug('QKA: Input Parameter Match found on %s' % (param.name))
i+=1
i=0
#Add any new parameters
for j in range(len(inputparams), len(input_list)):
par = InputParameter(name=input_list[j].text, keyactionid=ip_id)
Logger.debug('QKA: New Input Parameter added %s' % (par.name))
session.add(par)
session.commit()
def SaveInputParameters(self, child, ka_rows, id, orig_ip_list):
#Input Parameters
self.ValidateInputParameter(child.iplist, ka_rows, id, orig_ip_list)
def SaveFlowchart(self, nodes_list, current_script, current_project, current_client, current_workflow):
#Delete all the existing flowchart nodes
fl_nodes = session.query(FlowchartPosition).join(WorkflowAction).join(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==current_workflow).filter(TestScript.name == current_script).\
filter(Project.name==current_project).filter(Client.name==current_client).all()
for node in fl_nodes:
session.delete(node)
session.commit()
#Add the current nodes into the db
for node in nodes_list:
wfa = session.query(WorkflowAction).join(KeyAction).join(Workflow).\
join(TestScript).join(Project).join(Client).filter(KeyAction.name==node.label.img.text).\
filter(TestScript.name == current_script).filter(Project.name==current_project).\
filter(Client.name==current_client).filter(Workflow.name==current_workflow).all()
fl = session.query(FlowchartPosition).filter(FlowchartPosition.keyactionid == wfa[0].id).all()
if len(fl) == 0:
flow = FlowchartPosition(keyactionid=wfa[0].id, row=node.cell.row, col=node.cell.col)
session.add(flow)
else:
flow = fl[0]
flow.row = node.cell.row
flow.col = node.cell.col
session.commit()
#------------------------------------------------------------
#----------------DB Writer-----------------------------------
#------------------------------------------------------------
#Internal DB Writer catches the data stream and writes results to database (Import/Export)
class DBWriter():
def write(self, stream):
while stream.result_stream.empty() == False:
#Retrieve the top value from the queue
data_buffer = stream.result_stream.get()
#Write the data to the DB
if data_buffer.type == 0:
#The buffer data type is not assigned, perform no operations
Logger.debug('Writer: Buffer Data Type not assigned')
elif data_buffer.type == 1:
Logger.debug('Writer: Product Export Initialized')
#Create an Import Product object
imp = ProductImport()
imp.importid = data_buffer.data[0]
#Does the product already exist in the DB?
result = session.query(Product).filter(Product.name == '%s' % (data_buffer.data[1])).all()
if result is not None and len(result) != 0:
imp.productid = result[0].id
else:
#Make a new product
prod = Product()
prod.name = data_buffer.data[1]
session.add(prod)
session.commit()
imp.productid = prod.id
session.add(imp)
session.commit()
elif data_buffer.type == 2:
Logger.debug('Writer: Module Export Initialized')
#Create an Import Module object
imp = ModuleImport()
imp.importid = data_buffer.data[0]
#Does the module already exist in the DB?
result = session.query(Module).join(Product).join(ProductImport).\
filter(Module.name == '%s' % (data_buffer.data[2])).\
filter(ProductImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.moduleid = result[0].id
else:
#Find the product for the new module
prod = session.query(Product).join(ProductImport).\
filter(ProductImport.importid == data_buffer.data[1]).all()
if prod is not None and len(prod) != 0:
#Make a new module
mod = Module()
mod.name = data_buffer.data[2]
mod.productid = prod[0].id
session.add(mod)
session.commit()
imp.moduleid = mod.id
else:
#If the product import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Product not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 3:
Logger.debug('Writer: System Area Export Initialized')
#Create an Import System Area object
imp = SystemAreaImport()
imp.importid = data_buffer.data[0]
#Does the system area already exist in the DB?
result = session.query(SystemArea).join(Module).join(ModuleImport).\
filter(SystemArea.name == '%s' % (data_buffer.data[2])).\
filter(ModuleImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.systemareaid = result[0].id
else:
#Find the module for the new system area
mod = session.query(Module).join(ModuleImport).\
filter(ModuleImport.importid == data_buffer.data[1]).all()
if mod is not None and len(mod) != 0:
#Make a new system area
sa = SystemArea()
sa.name = data_buffer.data[2]
sa.moduleid = mod[0].id
session.add(sa)
session.commit()
imp.systemareaid = sa.id
else:
#If the module import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Module not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 4:
Logger.debug('Writer: Key Action Export Initialized')
#Create an Import Key Action object
imp = KeyActionImport()
imp.importid = data_buffer.data[0]
#Does the key action already exist in the DB?
result = session.query(KeyAction).join(SystemArea).join(SystemAreaImport).\
filter(KeyAction.name == '%s' % (data_buffer.data[2])).\
filter(SystemAreaImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.keyactionid = result[0].id
result[0].name = data_buffer.data[2]
result[0].description = data_buffer.data[3]
if data_buffer.data[4] == 0 or data_buffer.data[4] == '0'\
or data_buffer.data[4] == False or data_buffer.data[4] == 'False'\
or data_buffer.data[4] is None or data_buffer.data[4] == '':
result[0].custom = False
else:
result[0].custom = True
else:
#Find the system area for the new key action
sa = session.query(SystemArea).join(SystemAreaImport).\
filter(SystemAreaImport.importid == data_buffer.data[1]).all()
if sa is not None and len(sa) != 0:
#Make a new key action
ka = KeyAction()
ka.name = data_buffer.data[2]
ka.systemareaid = sa[0].id
ka.name = data_buffer.data[2]
ka.description = data_buffer.data[3]
if data_buffer.data[4] == 0 or data_buffer.data[4] == '0'\
or data_buffer.data[4] == False or data_buffer.data[4] == 'False'\
or data_buffer.data[4] is None or data_buffer.data[4] == '':
ka.custom = False
else:
ka.custom = True
session.add(ka)
session.commit()
imp.keyactionid = ka.id
else:
#If the system area import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import System Area not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 5:
Logger.debug('Writer: Input Parameter Export Initialized')
#Create an Import Key Action object
imp = InputParameterImport()
imp.importid = data_buffer.data[0]
#Does the input parameter already exist in the DB?
result = session.query(InputParameter).join(KeyAction).join(KeyActionImport).\
filter(InputParameter.name == '%s' % (data_buffer.data[2])).\
filter(KeyActionImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.inputparameterid = result[0].id
else:
#Find the key action for the new input parameter
ka = session.query(KeyAction).join(KeyActionImport).\
filter(KeyActionImport.importid == data_buffer.data[1]).all()
if ka is not None and len(ka) != 0:
#Make a new input paramter
ip = InputParameter()
ip.name = data_buffer.data[2]
ip.keyactionid = ka[0].id
session.add(ip)
session.commit()
imp.keyactionid = ip.id
else:
#If the key action import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Key Action not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 6:
Logger.debug('Writer: Client Export Initialized')
#Create an Import Client object
imp = ClientImport()
imp.importid = data_buffer.data[0]
#Does the client already exist in the DB?
result = session.query(Client).filter(Client.name == '%s' % (data_buffer.data[1])).all()
if result is not None and len(result) != 0:
imp.clientid = result[0].id
else:
#Make a new client
client = Client()
client.name = data_buffer.data[1]
session.add(client)
session.commit()
imp.clientid = client.id
session.add(imp)
session.commit()
elif data_buffer.type == 7:
Logger.debug('Writer: Project Export Initialized')
#Create an Import Project object
imp = ProjectImport()
imp.importid = data_buffer.data[0]
#Does the system area already exist in the DB?
result = session.query(Project).join(Client).join(ClientImport).\
filter(Project.name == '%s' % (data_buffer.data[2])).\
filter(ClientImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.projectid = result[0].id
else:
#Find the client for the new project
cl = session.query(Client).join(ClientImport).\
filter(ClientImport.importid == data_buffer.data[1]).all()
if cl is not None and len(cl) != 0:
#Make a new project
pr = Project()
pr.name = data_buffer.data[2]
pr.clientid = cl[0].id
session.add(pr)
session.commit()
imp.projectid = pr.id
else:
#If the client import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Client not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 8:
Logger.debug('Writer: Test Script Export Initialized')
#Create an Import Test Script object
imp = TestScriptImport()
imp.importid = data_buffer.data[0]
#Does the test script already exist in the DB?
result = session.query(TestScript).join(Project).join(ProjectImport).\
filter(TestScript.name == '%s' % (data_buffer.data[2])).\
filter(ProjectImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.testscriptid = result[0].id
else:
#Find the project for the new test script
pr = session.query(Project).join(ProjectImport).\
filter(ProjectImport.importid == data_buffer.data[1]).all()
if pr is not None and len(pr) != 0:
#Make a new test script
ts = TestScript()
ts.name = data_buffer.data[2]
ts.projectid = pr[0].id
session.add(ts)
session.commit()
imp.testscriptid = ts.id
else:
#If the project import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Project not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 9:
Logger.debug('Writer: Workflow Export Initialized')
#Create an Import Workflow object
imp = WorkflowImport()
imp.importid = data_buffer.data[0]
#Does the workflow already exist in the DB?
result = session.query(Workflow).join(TestScript).join(TestScriptImport).\
filter(Workflow.name == '%s' % (data_buffer.data[2])).\
filter(TestScriptImport.importid == data_buffer.data[1]).all()
if result is not None and len(result) != 0:
imp.workflowid = result[0].id
#Remove the workflow actions from the workflow and replace them
wfas = session.query(WorkflowAction).join(Workflow).filter(Workflow.id == result[0].id)
for wfa in wfas:
#Clear the next actions, flowchart positions, and workflow parameters from the workflow action
#This allows for a full replace when doing dataloaders of these
#lower level objects while updating on matches with higher level
#objects
na = session.query(WorkflowNextAction).join(WorkflowAction).filter(WorkflowAction.id == wfa.id)
fc = session.query(FlowchartPosition).join(WorkflowAction).filter(WorkflowAction.id == wfa.id)
wp = session.query(WorkflowParameter).join(WorkflowAction).filter(WorkflowAction.id == wfa.id)
for n in na:
session.delete(n)
for f in fc:
session.delete(f)
for w in wp:
session.delete(w)
session.delete(wfa)
session.commit()
else:
#Find the test script for the new workflow
ts = session.query(TestScript).join(TestScriptImport).\
filter(TestScriptImport.importid == data_buffer.data[1]).all()
if ts is not None and len(ts) != 0:
#Make a new workflow
wf = Workflow()
wf.name = data_buffer.data[2]
wf.testscriptid = ts[0].id
session.add(wf)
session.commit()
imp.workflowid = wf.id
else:
#If the Test Script import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Test Script not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 10:
Logger.debug('Writer: Workflow Action Export Initialized')
#Create an Import Workflow Action object
imp = WorkflowActionImport()
imp.importid = data_buffer.data[0]
#Does the workflow action already exist in the DB? No
#Find the workfow for the new workflow action
wf = session.query(Workflow).join(WorkflowImport).\
filter(WorkflowImport.importid == data_buffer.data[2]).all()
#Find the key action for the new workflow action
ka = session.query(KeyAction).join(KeyActionImport).\
filter(KeyActionImport.importid == data_buffer.data[1]).all()
if wf is not None and len(wf) != 0:
if ka is not None and len(ka) != 0:
#Make a new workflow action
wfa = WorkflowAction()
wfa.keyactionid = ka[0].id
wfa.workflowid = wf[0].id
wfa.expectedresult = data_buffer.data[3]
wfa.notes = data_buffer.data[4]
wfa.fail = data_buffer.data[5]
session.add(wfa)
session.commit()
imp.workflowactionid = wfa.id
else:
#If the key action import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Key Action not found in DB')
stream.error_stream.put(data_buffer)
return True
else:
#If the Workflow import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Workflow not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 11:
Logger.debug('Writer: Workflow Next Action Export Initialized')
#Create an Import WorkflowNext Action object
imp = WorkflowNextActionImport()
imp.importid = data_buffer.data[0]
#Does the workflow next action already exist in the DB?
#We can assume no, and clear the workflow next actions & replce them
#Find the workfow action for the new workflow next action
wfa = session.query(WorkflowAction).join(WorkflowActionImport).\
filter(WorkflowActionImport.importid == data_buffer.data[1]).all()
#Find the next workflow id for the new workflow next action
wfa2 = session.query(WorkflowAction).join(WorkflowActionImport).\
filter(WorkflowActionImport.importid == data_buffer.data[2]).all()
if wfa is not None and len(wfa) != 0:
if wfa2 is not None and len(wfa2) != 0:
#Make a new workflow next action
wfna = WorkflowNextAction()
wfna.keyactionid = wfa[0].id
wfna.nextactionid = wfa2[0].id
session.add(wfna)
session.commit()
imp.workflownextactionid = wfna.id
else:
#If the first workflow action import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('First Import Workflow Action not found in DB')
stream.error_stream.put(data_buffer)
return True
else:
#If the second workflow action import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Second Import Workflow Action not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
elif data_buffer.type == 12:
Logger.debug('Writer: Workflow Parameter Export Initialized')
#Create an Import Workflow Parameter object
imp = WorkflowParameterImport()
imp.importid = data_buffer.data[0]
ips = session.query(InputParameter).join(InputParameterImport).\
filter(InputParameterImport.importid == data_buffer.data[2]).all()
if len(ips) != 0 and ips is not None:
ip = ips[0]
else:
#If the workflow parameter import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Workflow Parameter not found in DB')
stream.error_stream.put(data_buffer)
return True
#Does the workflow parameter already exist in the DB?
#We can assume no, and clear the workflow next actions & replce them
#Find the workfow action for the new workflow parameter
wfas = session.query(WorkflowAction).join(WorkflowActionImport).\
filter(WorkflowActionImport.importid == data_buffer.data[1]).all()
if wfas is not None and len(wfa) != 0:
wfa = wfas[0]
else:
#If the product import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Product not found in DB')
stream.error_stream.put(data_buffer)
return True
#Make a new workflow parameter
wfp = WorkflowNextAction()
wfp.keyactionid = wfa.id
wfp.inputparameterid = ip.id
wfp.value = data_buffer.data[3]
session.add(wfp)
session.commit()
imp.workflowparameterid = wfp.id
session.add(imp)
session.commit()
elif data_buffer.type == 13:
Logger.debug('Writer: Flowchart Export Initialized')
#Create an Import Flowchart object
imp = FlowchartPositionImport()
imp.importid = data_buffer.data[0]
#Does the workflow parameter already exist in the DB?
#We can assume no, and clear the workflow next actions & replce them
#Find the workfow action for the new flowchart position
wfa = session.query(WorkflowAction).join(WorkflowActionImport).\
filter(WorkflowActionImport.importid == data_buffer.data[1]).all()
if wfa is not None and len(wfa) != 0:
#Make a new flowchart position
fp = FlowchartPosition()
fp.keyactionid = wfa[0].id
fp.row = data_buffer.data[2]
fp.col = data_buffer.data[3]
session.add(fp)
session.commit()
imp.flowchartpositionid = fp.id
else:
#If the workflow action import can't be found, then the buffer should be
#added to the error queue and the method exited
data_buffer.add_error('Import Workflow Action not found in DB')
stream.error_stream.put(data_buffer)
return True
session.add(imp)
session.commit()
#Finish with the data
data_buffer.next_status()
stream.result_stream.task_done()
#------------------------------------------------------------
#------------------------------------------------------------
#----------------Main App------------------------------------
#------------------------------------------------------------
#------------------------------------------------------------
#Load the .kv file
Builder.load_file('kv/TestScriptOptionsPopup.kv')
Builder.load_file('kv/AddToWorkflowPopup.kv')
Builder.load_file('kv/CreateWorkflowPopup.kv')
Builder.load_file('kv/KeyActionGroupScreen.kv')
Builder.load_file('kv/KeyActionsTabbedPanel.kv')
Builder.load_file('kv/LoadWorkflowPopup.kv')
Builder.load_file('kv/LoadSubflowPopup.kv')
Builder.load_file('kv/SelectableButton.kv')
Builder.load_file('kv/WorkflowScreen.kv')
Builder.load_file('kv/ForInPopup.kv')
Builder.load_file('kv/DeletePopup.kv')
Builder.load_file('kv/DBAdministrator.kv')
Builder.load_file('kv/FileChooserPopup.kv')
Builder.load_file('kv/ExportParametersPopup.kv')
Logger.info('KV: KV Files Loaded')
#Global Variable Declarations
#Create the DB Writer
writer = DatabaseWriter()
#Create the filter manager
filter = FilterManager()
#Create the export template reader
tr = TemplateReader(engine_name)
#Create the Selection List
selected = []
#Create the list of selected key action id's
selected_ids = []
#Create the list of id's in the key action carousel
carousel_ids = []
class KeyActionGroupScreen(Screen):
pop_up=ObjectProperty(None)
original_pop_up = ObjectProperty(None)
current_product = ObjectProperty(None)
class WorkflowScreen(Screen):
pop_up=ObjectProperty(None)
drag_grid=ObjectProperty(None)
grid_layout=ObjectProperty(None)
float_layout=ObjectProperty(None)
current_wf=ObjectProperty(None)
current_workflowname=StringProperty(None)
current_script=StringProperty(None)
current_project=StringProperty(None)
current_client=StringProperty(None)
class SelectableGrid(GridLayout):
pass
class ProductPanel(BoxLayout):
product_spinner = ObjectProperty(None)
product_text = ObjectProperty(None)
class ConnectionsPanel(BoxLayout):
db = ObjectProperty(None)
class FileChooserPopup(BoxLayout):
text_input = ObjectProperty(None)
file_chooser = ObjectProperty(None)
app = ObjectProperty(None)
class DestinationFileChooserPopup(BoxLayout):
text_input = ObjectProperty(None)
file_chooser = ObjectProperty(None)
app = ObjectProperty(None)
class DatabaseWidget(BoxLayout):
direction_spinner = ObjectProperty(None)
translator_spinner = ObjectProperty(None)
type_spinner = ObjectProperty(None)
source_input = ObjectProperty(None)
destination_input = ObjectProperty(None)
finddestpopup_button = ObjectProperty(None)
class KeyActionAdvancedOptionsPopup(BoxLayout):
pass
class KeyActionTabbedPanel(TabbedPanel):
ka_prodpanel = ObjectProperty(None)
conn_panel = ObjectProperty(None)
class ForInPopup(BoxLayout):
app=ObjectProperty(None)
keyaction_spinner=ObjectProperty(None)
inputparameter_spinner=ObjectProperty(None)
in_textinput=ObjectProperty(None)
endaction_spinner=ObjectProperty(None)
class TestScriptOptionsPopup(BoxLayout):
current_client = ObjectProperty(None)
load_client = ObjectProperty(None)
new_client = ObjectProperty(None)
current_project = ObjectProperty(None)
load_project = ObjectProperty(None)
new_project = ObjectProperty(None)
current_testscript = ObjectProperty(None)
load_testscript = ObjectProperty(None)
new_testscript = ObjectProperty(None)
app = ObjectProperty(None)
class ExportPopup(BoxLayout):
pass
class ExportParametersPopup(GridLayout):
input_grid = ObjectProperty(None)
class AddToWorkflowPopup(BoxLayout):
spinner=ObjectProperty(None)
atwp_workflow=ObjectProperty(None)
atwp_testscript=ObjectProperty(None)
atwp_client=ObjectProperty(None)
atwp_project=ObjectProperty(None)
class LoadWorkflowPopup(BoxLayout):
spinner=ObjectProperty(None)
lwp_workflow=ObjectProperty(None)
lwp_testscript=ObjectProperty(None)
lwp_client=ObjectProperty(None)
lwp_project=ObjectProperty(None)
class LoadSubflowPopup(BoxLayout):
new_name=ObjectProperty(None)
spinner=ObjectProperty(None)
lwp_workflow=ObjectProperty(None)
lwp_testscript=ObjectProperty(None)
lwp_client=ObjectProperty(None)
lwp_project=ObjectProperty(None)
class CreateWorkflowPopup(BoxLayout):
new_flow=ObjectProperty(None)
spinner=ObjectProperty(None)
cwp_workflow=ObjectProperty(None)
cwp_testscript=ObjectProperty(None)
cwp_client=ObjectProperty(None)
cwp_project=ObjectProperty(None)
class DeletePopup(BoxLayout):
label=ObjectProperty(None)
class SelectableButton(ToggleButton):
#Exposes on_selection event
selection = BooleanProperty(False)
#Internal, for the Grid Layout to control the selection
object_id = NumericProperty(0)
#Assumes button starts with selection = False
def SelectButton(self, *args):
if self.selection == False:
self.selection = True
selected.append(self.text)
selected_ids.append(self.object_id)
else:
self.selection = False
selected.remove(self.text)
selected_ids.remove(self.object_id)
#------------------------------------------------------------
#----------------Central App Class---------------------------
#------------------------------------------------------------
#Create the Screenmanager and add the Screens
sm = ScreenManager()
sm.add_widget(KeyActionGroupScreen(name='keyactiongroup'))
sm.add_widget(WorkflowScreen(name='workflow'))
Logger.info('Kivy: Screens added to Screen Manager')
#App Definition
class TestScriptBuilderApp(App):
#Initialization function
#Set the first screen and return the screen manager
def build(self):
Logger.debug('Kivy: Set current Screen and return the ScreenManager')
#Set the current page to key action and run a default filter
sm.current = 'keyactiongroup'
Clock.schedule_once(self.AdvancedOptionsPopup_KAG)
return sm
#----------------------------------------------------------
#------------------DB Admin Callbacks----------------------
#----------------------------------------------------------
def FirstFilter(self, *args):
#self.root.get_screen('keyactiongroup').current_product = 'Default'
self.root.get_screen('workflow').current_client = 'Default'
self.root.get_screen('workflow').current_project = 'Default'
self.root.get_screen('workflow').current_script = 'Default'
filter.FirstPage()
prod_rows = session.query(Product).filter(Product.name == self.root.get_screen('keyactiongroup').current_product).all()
if len(prod_rows) == 0:
prod = Product(name=self.root.get_screen('keyactiongroup').current_product)
session.add(prod)
session.commit()
def FindSourcePopup(self, *args):
Logger.debug('Find Source Popup')
self.root.get_screen('keyactiongroup').original_pop_up = self.root.get_screen('keyactiongroup').pop_up
popup = Popup(title='Source', content=FileChooserPopup(app=self), size_hint=(0.5, 0.75))
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('keyactiongroup').pop_up = popup
popup.open()
def FillInput(self, *args):
Logger.debug('Fill Source Popup')
selected_file = self.root.get_screen('keyactiongroup').pop_up.content.file_chooser.selection[0]
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('keyactiongroup').pop_up = self.root.get_screen('keyactiongroup').original_pop_up
self.root.get_screen('keyactiongroup').pop_up.open()
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text = selected_file
def FillDestinationInput(self, *args):
Logger.debug('Fill Destination Popup')
selected_file = self.root.pop_up.content.file_chooser.selection[0]
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('keyactiongroup').pop_up = self.root.get_screen('keyactiongroup').original_pop_up
self.root.get_screen('keyactiongroup').pop_up.open()
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.destination_input.text = selected_file
def FindDestinationPopup(self, *args):
Logger.debug('Find Destination Popup')
self.root.get_screen('keyactiongroup').original_pop_up = self.root.get_screen('keyactiongroup').pop_up
popup = Popup(title='Destination', content=DestinationFileChooserPopup(app=self), size_hint=(0.5, 0.75))
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('keyactiongroup').pop_up = popup
popup.open()
def ShowExportParametersPopup(self, *args):
Logger.debug('Show Export Parameters Popup')
#Create the popup
self.root.get_screen('keyactiongroup').original_pop_up = self.root.get_screen('keyactiongroup').pop_up
popup = Popup(title='Export Parameters', content=ExportParametersPopup(), size_hint=(0.3, 0.5))
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('keyactiongroup').pop_up = popup
#Generate the parameter list
xml_path = os.path.abspath('../Configuration/ExportTemplates/%s' % (self.root.get_screen('keyactiongroup').original_pop_up.content.conn_panel.db.type_spinner.text))
params = tr.generate_parameter_list(xml_path)
#Add the parameter list as text inputs to the popup
i=0
j=len(params)
for i in range(0, j):
inp = TextInput(hint_text = '%s' % (params[i]), multiline = False)
popup.content.input_grid.add_widget(inp)
#Show the popup
popup.open()
def ExecuteExport(self, *args):
params = []
xml_path = os.path.abspath('../Configuration/ExportTemplates/%s' % (self.root.get_screen('keyactiongroup').original_pop_up.content.conn_panel.db.type_spinner.text))
popup=self.root.get_screen('keyactiongroup').pop_up
#Fix for reversed input parameters
for inp in popup.content.input_grid.children:
params.insert(0, inp.text)
tr.translate_template(xml_path, params)
def RunMigration(self, *args):
Logger.debug('Run Migration')
#Determine if we are using the import pipeline or export pipeline
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.direction_spinner.text == 'Export':
self.ShowExportParametersPopup(args)
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.direction_spinner.text == 'Import':
#Create Data Stream
stream = DataStream()
#Create Translators & Writers
#Read the data type
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text == 'Key Action':
import_type = 0
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text == 'Workflow':
import_type = 1
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text == 'Test Script':
import_type = 2
else:
import_type = 3
Logger.debug('Import Type Unresolved')
writer = DBWriter()
#Find the importer
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text == 'CSV':
importer = CSVTranslator(self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text, import_type, stream.buffer_stream, 10)
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text == 'Excel':
importer = ExcelTranslator(self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text, import_type, stream.buffer_stream, 10)
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text == 'DB':
importer = ExternalDBTranslator(self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text, import_type, stream.buffer_stream, 10)
else:
Logger.debug('Nothing Selected')
return True
#Nothing selected
log_writer = TerminalWriter()
while importer.translation_finished == False:
#Single Iteration
#Run Translations
importer.translate()
#Run Validations
stream.stream()
#Run Writer
writer.write(stream)
#Run Error Writer
log_writer.write(stream)
#DB Cleanup
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.direction_spinner.text == 'Import':
#Delete everything from the key action import tables
imp = session.query(InputParameterImport).all()
for i in imp:
session.delete(i)
ka = session.query(KeyActionImport).all()
for i in ka:
session.delete(i)
sa = session.query(SystemAreaImport).all()
for i in sa:
session.delete(i)
mod = session.query(ModuleImport).all()
for i in mod:
session.delete(i)
prod = session.query(ProductImport).all()
for i in prod:
session.delete(i)
if import_type != 0:
#Delete everything from the workflow import tables
cl = session.query(ClientImport).all()
for c in cl:
session.delete(c)
pr = session.query(ProjectImport).all()
for p in pr:
session.delete(p)
ts = session.query(TestScriptImport).all()
for t in ts:
session.delete(t)
wf = session.query(WorkflowImport).all()
for w in wf:
session.delete(w)
wfa = session.query(WorkflowActionImport).all()
for a in wfa:
session.delete(a)
wfna = session.query(WorkflowNextActionImport).all()
for na in wfna:
session.delete(na)
wfp = session.query(WorkflowParameterImport).all()
for p in wfp:
session.delete(p)
fl = session.query(FlowchartPositionImport).all()
for l in fl:
session.delete(l)
session.commit()
def UpdateDirection(self, *args):
Logger.debug('Update Direction')
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.direction_spinner.text == 'Import':
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.destination_input.text = 'test.db'
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text = ''
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values[:]
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values.append('CSV')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values.append('Excel')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values.append('DB')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text = ''
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text = ''
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.finddestpopup_button.disabled = False
else:
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.source_input.text = 'test.db'
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.destination_input.text = ''
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values[:]
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.values.append('Excel')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text = 'Excel'
#Populate the values in the data type spinner values with the xml templates in the
#src.export_templates folder
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values[:]
for file in tr.select_files_in_folder(os.path.abspath("../Configuration/ExportTemplates"), 'xml'):
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values.append(os.path.basename(file))
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text = ''
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.finddestpopup_button.disabled = True
def UpdateTranslator(self, *args):
Logger.debug('Update Translator')
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.direction_spinner.text == 'Import':
if self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text =='CSV':
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values[:]
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values.append('Key Action')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text = 'Key Action'
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text == 'Excel':
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values[:]
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values.append('Key Action')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text = 'Key Action'
elif self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.translator_spinner.text == 'DB':
del self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values[:]
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.values.append('Workflow')
self.root.get_screen('keyactiongroup').pop_up.content.conn_panel.db.type_spinner.text = 'Workflow'
#----------------------------------------------------------
#------------------WF Callbacks----------------------------
#----------------------------------------------------------
def add_ip_to_popup(self, *args):
Logger.debug('WF: Add IP to Popup')
ip = TextInput(hint_text='Input Parameter')
self.root.get_screen('workflow').pop_up.content.ipgrid_in.add_widget(ip)
self.root.get_screen('workflow').pop_up.content.ips.append(ip)
def ClearWorkflow(self):
#Clear the current workflow information and input box
# self.root.get_screen('workflow').current_client = 'Default'
# self.root.get_screen('workflow').current_project = 'Default'
# self.root.get_screen('workflow').current_script = 'Default'
# self.root.get_screen('workflow').current_workflowname = 'Default'
# self.root.get_screen('workflow').current_wf.text = 'Default'
#Clear the Drag Grid, Draggable List
self.root.get_screen('workflow').drag_grid.clear_cells()
self.root.get_screen('workflow').grid_layout.clear_widgets()
self.root.get_screen('workflow').float_layout.clear_widgets()
def CreateNewSubflow(self, *args):
Logger.debug('WF: Create New Subflow')
popup = Popup(title='Load Workflow', content=LoadSubflowPopup(), size_hint=(0.4, 0.5))
popup.open()
self.root.get_screen('keyactiongroup').pop_up = popup
#Populate all clients
clients = session.query(Client).all()
for client in clients:
popup.content.lwp_client.values.append(client.name)
#Populate all projects
projects = session.query(Project).all()
for project in projects:
popup.content.lwp_project.values.append(project.name)
#Populate the latest 5 workflows into the spinner from the current testscript
num_flows = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(TestScript.name==self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).count()
if num_flows - popup_filter_limit < 0:
num_flows = 0
else:
num_flows = num_flows - popup_filter_limit
results = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(TestScript.name==self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).order_by(Workflow.id)[num_flows:num_flows+popup_filter_limit]
#Populate values in spinner
for result in results:
popup.content.spinner.values.append(result.name)
def LoadSubflow(self, *args):
Logger.debug('Load Subflow')
popup = self.root.get_screen('keyactiongroup').pop_up
current_workflow=popup.content.spinner.text
self.root.get_screen('workflow').current_script = popup.content.lwp_testscript.text
self.root.get_screen('workflow').current_project = popup.content.lwp_project.text
self.root.get_screen('workflow').current_client = popup.content.lwp_client.text
new_workflow=popup.content.new_name.text
self.root.get_screen('keyactiongroup').pop_up.dismiss()
#Copy the current workflow into a new workflow
#Check if the new workflow already exists
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).filter(Workflow.name==new_workflow).all()
if len(wf)==0:
ts = session.query(TestScript).join(Project).join(Client).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
script = ts[0]
flow = Workflow(name=new_workflow, testscriptid=script.id)
session.add(flow)
session.commit()
else:
flow = wf[0]
#Copy the workflow actions
actions = session.query(KeyAction).join(WorkflowAction).join(Workflow).\
join(TestScript).join(Project).join(Client).filter(Workflow.name==current_workflow).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
for action in actions:
wfa = session.query(WorkflowAction).join(KeyAction).join(Workflow).join(TestScript).\
join(Project).join(Client).filter(KeyAction.name==action.name).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
flowaction = wfa[0]
ips = session.query(InputParameter).join(WorkflowParameter).join(WorkflowAction).filter(WorkflowAction.id==flowaction.id).all()
ip_value_list = []
for ip in ips:
ip_value_list.append(ip.name)
writer.SaveWorkflowAction(action.name, flow.name, flowaction.expectedresult,\
ip_value_list, self.root.get_screen('workflow').current_script,\
self.root.get_screen('workflow').current_project, self.root.get_screen('workflow').current_client)
#Clear the current elements in the UI
self.ClearWorkflow()
#Load the Key Actions from the new subflow into the editor
keyactions = session.query(KeyAction).join(WorkflowAction).\
join(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==new_workflow).filter(TestScript.name == self.root.get_screen('workflow').current_script).\
filter(Project.name==self.root.get_screen('workflow').current_project).filter(Client.name==self.root.get_screen('workflow').current_client).all()
#Put each element into the draggable list
for action in keyactions:
lbl = Label(text=action.name)
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('workflow').ids.current_wf.text = new_workflow
self.root.get_screen('workflow').current_workflowname = new_workflow
self.root.get_screen('workflow').current_wf.text = new_workflow
def SaveProductPanel(self, *args):
Logger.debug('Save Product Panel')
if self.root.get_screen('keyactiongroup').pop_up.content.ka_prodpanel.product_spinner.text != '' or self.root.get_screen('keyactiongroup').pop_up.content.ka_prodpanel.product_spinner.text is None:
self.root.get_screen('keyactiongroup').current_product = self.root.get_screen('keyactiongroup').pop_up.content.ka_prodpanel.product_spinner.text
Logger.debug('Current Product set from spinner')
else:
prod_name = self.root.get_screen('keyactiongroup').pop_up.content.ka_prodpanel.product_text.text
if len(prod_name) > 1 or prod_name[0].isupper():
prod_rows = session.query(Product).filter(Product.name == prod_name).all()
if prod_rows is None or len(prod_rows) == 0:
prod = Product(name=prod_name)
session.add(prod)
session.commit()
fprod = prod.name
else:
fprod = prod_rows[0].name
self.root.get_screen('keyactiongroup').current_product = fprod
else:
lbl = Label(text='%s is not long enough or not capitalized' % (prod_name))
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
self.root.get_screen('keyactiongroup').pop_up.dismiss()
def AddAndNode(self, *args):
Logger.debug('WF: Add And Node')
current_workflow=self.root.get_screen('workflow').current_workflowname
#--UI--
#Create a Label
lbl = Label(text='AND')
#Create an Add Option in the Draggable List
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
#--DB--
#Find a key action
ka = session.query(KeyAction).filter(KeyAction.name=='AND').all()
#Find the workflow
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==current_workflow).filter(TestScript.name == self.root.get_screen('workflow').current_script).\
filter(Project.name==self.root.get_screen('workflow').current_project).filter(Client.name==self.root.get_screen('workflow').current_client).one()
if len(ka) == 0:
keyaction = KeyAction(name='AND')
session.add(keyaction)
session.commit()
else:
keyaction = ka[0]
#Add the workflow action
wfa = WorkflowAction(keyactionid=keyaction.id, workflowid=wf.id)
session.add(wfa)
session.commit()
def AddOrNode(self, *args):
Logger.debug('WF: Add Or Node')
current_workflow=self.root.get_screen('workflow').current_workflowname
#--UI--
#Create a Label
lbl = Label(text='OR')
#Create an Or Option in the Draggable List
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
#--DB--
#Find a key action
ka = session.query(KeyAction).filter(KeyAction.name=='OR').all()
#Find the workflow
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==current_workflow).filter(TestScript.name == self.root.get_screen('workflow').current_script).\
filter(Project.name==self.root.get_screen('workflow').current_project).filter(Client.name==self.root.get_screen('workflow').current_client).one()
if len(ka) == 0:
keyaction = KeyAction(name='OR')
session.add(keyaction)
session.commit()
else:
keyaction = ka[0]
#Add the workflow action
wfa = WorkflowAction(keyactionid=keyaction.id, workflowid=wf.id)
session.add(wfa)
session.commit()
def ShowForPopup(self, *args):
Logger.debug('WF: Show For Popup')
current_workflow=self.root.get_screen('workflow').current_workflowname
popup = Popup(title='For-In', content=ForInPopup(), size_hint=(0.5, 0.4))
self.root.get_screen('workflow').pop_up = popup
popup.open()
#Load The Key Actions
keyactions = session.query(KeyAction).join(WorkflowAction).join(Workflow).filter(Workflow.name==current_workflow).all()
for action in keyactions:
popup.content.keyaction_spinner.values.append(action.name)
popup.content.endaction_spinner.values.append(action.name)
def AddForNode(self, *args):
Logger.debug('WF: Add For Node')
popup=self.root.get_screen('workflow').pop_up
current_workflow=self.root.get_screen('workflow').current_workflowname
#--UI--
#Create a Label
lbl = Label(text='FOR')
#Create an Add Option in the Draggable List
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
#--DB--
#Find a key action
ka = session.query(KeyAction).filter(KeyAction.name=='FOR').all()
#Find the workflow
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==current_workflow).filter(TestScript.name == self.root.get_screen('workflow').current_script).\
filter(Project.name==self.root.get_screen('workflow').current_project).filter(Client.name==self.root.get_screen('workflow').current_client).one()
if len(ka) == 0:
keyaction = KeyAction(name='FOR')
session.add(keyaction)
session.commit()
else:
keyaction = ka[0]
#Add the workflow action
wfa = WorkflowAction(keyactionid=keyaction.id, workflowid=wf.id)
session.add(wfa)
session.commit()
#Add an input parameter
ip = InputParameter(keyactionid=keyaction.id, name='In')
session.add(ip)
ip2 = InputParameter(keyactionid=keyaction.id, name='Final Key Action')
session.add(ip2)
session.commit()
wp = WorkflowParameter(inputparamid=ip.id, keyactionid=wfa.id, value=popup.content.in_textinput.text)
session.add(wp)
wp = WorkflowParameter(inputparamid=ip2.id, keyactionid=wfa.id, value=popup.content.endaction_spinner.text)
session.add(wp)
session.commit()
popup.dismiss()
def UpdateIPSpinner(self, *args):
Logger.debug('WF: Update IP Spinner')
current_workflow=self.root.get_screen('workflow').current_wf.text
popup=self.root.get_screen('workflow').pop_up
#Clear the IP Spinner
del popup.content.inputparameter_spinner.values[:]
ips = session.query(InputParameter).join(KeyAction).filter(KeyAction.name==popup.content.keyaction_spinner.text).all()
for ip in ips:
popup.content.inputparameter_spinner.values.append(ip.name)
def AdvancedOptionsPopup_WF(self, *args):
Logger.debug('WF: Advanced Options Popup')
popup = Popup(title='Export Options', content=ExportPopup(), size_hint=(0.5, 0.75))
self.root.get_screen('workflow').pop_up = popup
popup.open()
def WFQuickActionPopup(self, *args):
Logger.debug('WF: Quick Action Popup')
popup = Popup(title='Quick Key Action', content=KeyActionPopup(app=self), size_hint=(0.5, 0.75))
self.root.get_screen('workflow').pop_up = popup
#popup.bind(on_dismiss=self.WFSaveQuickActionPopup)
popup.open()
def WFSaveQuickActionPopup(self, *args):
Logger.debug('WF: Save Action Popup')
popup = self.root.get_screen('workflow').pop_up
#Custom
if popup.content.custom_in.active:
cust = True
else:
cust = False
mod_text = popup.content.module_in.text
sa_text = popup.content.sa_in.text
ka_text = popup.content.ka_in.text
desc_text = popup.content.desc_in.text
prod = self.root.get_screen('keyactiongroup').current_product
if len(mod_text) < 2:
lbl = Label(text='Module Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(sa_text) < 2:
lbl = Label(text='System Area Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(ka_text) < 2:
lbl = Label(text='Key Action Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(desc_text) < 2:
lbl = Label(text='Description Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
#Save Key Action
writer.SaveKeyAction(prod, mod_text, sa_text, ka_text, desc_text, cust, popup.content.ips)
#Add to workflow
ip = []
writer.SaveWorkflowAction(popup.content.ka_in.text, self.root.get_screen('workflow').current_wf.text, '', ip, self.root.get_screen('workflow').current_script,\
self.root.get_screen('workflow').current_project, self.root.get_screen('workflow').current_client)
#Add node in list
lbl = Label(text=popup.content.ka_in.text)
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
#Load the Test Script Popup
def TestScriptPopup_WF(self, *args):
Logger.debug('WF: Test Script Popup')
popup = Popup(title='Test Script Options', content=TestScriptOptionsPopup(app=self), size_hint=(0.5, 0.75))
self.root.get_screen('workflow').pop_up = popup
popup.open()
#Populate the currently selected values into the popup
if self.root.get_screen('workflow').current_client is not None:
popup.content.load_client.text = self.root.get_screen('workflow').current_client
if self.root.get_screen('workflow').current_project is not None:
popup.content.load_project.text = self.root.get_screen('workflow').current_project
if self.root.get_screen('workflow').current_script is not None:
popup.content.load_testscript.text = self.root.get_screen('workflow').current_script
popup.content.load_client.values.append('')
popup.content.load_project.values.append('')
popup.content.load_testscript.values.append('')
#Populate the Spinners
clients = session.query(Client).all()
for client in clients:
popup.content.load_client.values.append(client.name)
projects = session.query(Project).all()
for project in projects:
popup.content.load_project.values.append(project.name)
scripts = session.query(TestScript).all()
for script in scripts:
popup.content.load_testscript.values.append(script.name)
#Update the project and test script spinners in the test script popup
def UpdateProjectAndTestScript(self, *args):
Logger.debug('WF: Test Script Popup')
popup = self.root.get_screen('workflow').pop_up
#Clear the spinners
del popup.content.load_project.values[:]
del popup.content.load_testscript.values[:]
#Query based on the updated client
projects = session.query(Project).join(Client).filter(Client.name == popup.content.load_client.text).all()
for project in projects:
popup.content.load_project.values.append(project.name)
scripts = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == popup.content.load_client.text).all()
for script in scripts:
popup.content.load_testscript.values.append(script.name)
#Update the test script spinner in the test script popup
def UpdateTestScript(self, *args):
Logger.debug('WF: Test Script Popup')
popup = self.root.get_screen('workflow').pop_up
del popup.content.load_testscript.values[:]
scripts = session.query(TestScript).join(Project).join(Client).\
filter(Client.name == popup.content.load_client.text).\
filter(Project.name==popup.content.load_project.text).all()
for script in scripts:
popup.content.load_testscript.values.append(script.name)
def SaveTestScriptPopup(self, *args):
Logger.debug('WF: Save Test Script Popup')
popup = self.root.get_screen('workflow').pop_up
cl_text = popup.content.new_client.text
pr_text = popup.content.new_project.text
ts_text = popup.content.new_testscript.text
#Validations
if (popup.content.new_client.text is not None and popup.content.new_client.text != ""):
if len(cl_text) < 2:
lbl = Label(text='Client name is not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
if (popup.content.new_project.text is not None and popup.content.new_project.text != ""):
if len(pr_text) < 2:
lbl = Label(text='Project name is not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
if (popup.content.new_testscript.text is not None and popup.content.new_testscript.text != ""):
if len(ts_text) < 2:
lbl = Label(text='Test Script name is not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
#If-Else Block to determine whether we're creating new values or using
#old ones, or a combination of the two
#New client, project, and test script
if (popup.content.new_client.text is not None and popup.content.new_client.text != "")\
and (popup.content.new_project.text is not None and popup.content.new_project.text != "")\
and (popup.content.new_testscript.text is not None and popup.content.new_testscript.text != ""):
Logger.debug('WF: Save Test Script Popup - New Client, Project & Test Script')
client = Client(name=popup.content.new_client.text)
session.add(client)
session.commit()
project = Project(name=popup.content.new_project.text, clientid=client.id)
session.add(project)
session.commit()
script = TestScript(name=popup.content.new_testscript.text, projectid=project.id)
session.add(script)
session.commit()
#New client and project
elif (popup.content.new_client.text is not None and popup.content.new_client.text != "")\
and (popup.content.new_project.text is not None and popup.content.new_project.text != ""):
Logger.debug('WF: Save Test Script Popup - New Client & Project')
#Invalid
#new project and test script
elif (popup.content.new_project.text is not None and popup.content.new_project.text != "")\
and (popup.content.new_testscript.text is not None and popup.content.new_testscript.text != ""):
Logger.debug('WF: Save Test Script Popup - New Project & Test Script')
cl = session.query(Client).filter(Client.name==popup.content.load_client.text).all()
client = cl[0]
project = Project(name=popup.content.new_project.text, clientid=client.id)
session.add(project)
session.commit()
script = TestScript(name=popup.content.new_testscript.text, projectid=project.id)
session.add(script)
session.commit()
#New client
elif (popup.content.new_client.text is not None and popup.content.new_client.text != ""):
Logger.debug('WF: Save Test Script Popup - New Client')
#Invalid
#New Project
elif (popup.content.new_project.text is not None and popup.content.new_project.text != ""):
Logger.debug('WF: Save Test Script Popup - New Project')
#invalid
#New Test Script
elif (popup.content.new_testscript.text is not None and popup.content.new_testscript.text != ""):
Logger.debug('WF: Save Test Script Popup - New Test Script')
cl = session.query(Client).filter(Client.name==popup.content.load_client.text).all()
client = cl[0]
pj = session.query(Project).join(Client).filter(Project.name==popup.content.load_project.text).\
filter(Client.name==popup.content.load_client.text).all()
project = pj[0]
script = TestScript(name=popup.content.new_testscript.text, projectid=project.id)
session.add(script)
session.commit()
#Load All From DB
else:
Logger.debug('WF: Save Test Script Popup - Existing Client, Project, Test Script')
cl = session.query(Client).filter(Client.name==popup.content.load_client.text).all()
client = cl[0]
pj = session.query(Project).join(Client).filter(Project.name==popup.content.load_project.text).\
filter(Client.name==popup.content.load_client.text).all()
project = pj[0]
sc = session.query(TestScript).join(Project).join(Client).\
filter(TestScript.name==popup.content.load_testscript.text).\
filter(Project.name==popup.content.load_project.text).\
filter(Client.name==popup.content.load_client.text).all()
script = sc[0]
#Clear the current elements in the UI
self.ClearWorkflow()
Logger.debug('Setting current variables: Script - %s; Project - %s; Client - %s' % (script.name, project.name, client.name))
#Assign the current script
self.root.get_screen('workflow').current_script = script.name
self.root.get_screen('workflow').current_project = project.name
self.root.get_screen('workflow').current_client = client.name
def UpdateWorkflowName(self, *args):
#When Enter is pressed on the current workflow text input, update the workflow name
Logger.debug('WF: Update Workflow Name')
wf = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==self.root.get_screen('workflow').current_workflowname).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
flow = wf[0]
flow.name = self.root.get_screen('workflow').current_wf.text
session.commit()
def SaveWorkflow(self, *args):
Logger.debug('WF: Save Workflow')
writer.SaveConnectionsList(self.root.get_screen('workflow').drag_grid.connections,\
self.root.get_screen('workflow').current_workflowname, self.root.get_screen('workflow').current_script,\
self.root.get_screen('workflow').current_project, self.root.get_screen('workflow').current_client)
writer.SaveFlowchart(self.root.get_screen('workflow').drag_grid.nodes, self.root.get_screen('workflow').current_script,\
self.root.get_screen('workflow').current_project, self.root.get_screen('workflow').current_client, self.root.get_screen('workflow').current_workflowname)
def SaveAction(self, *args):
Logger.debug('WF: Save Action')
#Pull side editor values
action_name = self.root.get_screen('workflow').ids.wf_carousel.name_in.text
flow_name = self.root.get_screen('workflow').current_workflowname
expected_results = self.root.get_screen('workflow').ids.wf_carousel.er_in.text
ip_value_list = []
for child in self.root.get_screen('workflow').ids.wf_carousel.ipgrid_in.children:
ip_value_list.append(child.text)
Logger.debug('%s appended to ip value list' % (child.text))
#Write values to the DB
writer.SaveWorkflowAction(action_name, flow_name, expected_results, ip_value_list, self.root.get_screen('workflow').current_script,\
self.root.get_screen('workflow').current_project, self.root.get_screen('workflow').current_client)
#This is a critical method as it is called when a draggable is released on
#the flowchart, to add a flowchart node. This takes the label from the original
#Draggable, puts it into a new draggable wrapper and then into the flowchart node
def add_flowchart_node(self, cell, image):
Logger.debug('Add flowchart node with image %s and cell %s' % (image, cell))
drag_label = DraggableImage(img=image, app=self, grid=self.root.get_screen('workflow').drag_grid,\
cell=cell, grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
drag = FlowChartNode(app=self, grid=self.root.get_screen('workflow').drag_grid, cell=cell, label=drag_label)
drag_label.node = drag
#Bind the double press to load the key action into the side editor
drag_label.bind(on_double_press=self.LoadSideEditor)
cell.add_widget(drag)
cell.nodes.append(drag)
self.root.get_screen('workflow').drag_grid.nodes.append(drag)
def add_draggable_node(self, image):
Logger.debug('Add draggable option to list')
drag_option = DraggableOption(img=image, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
def LoadSideEditor(self, node):
#Loop through the nodes in the grid and find the one that has been double pressed
Logger.debug('Load Side Editor with action %s' % (node.img.text))
#for node in self.root.get_screen('workflow').drag_grid.nodes:
#if node.label.is_double_pressed:
#Clear the elements of the side editor
self.root.get_screen('workflow').ids.wf_carousel.er_in.text = ''
self.root.get_screen('workflow').ids.wf_carousel.ipgrid_in.clear_widgets()
#Query the DB for the details of the action with the name from the label
ka = session.query(KeyAction).filter(KeyAction.name==node.img.text).one()
ips = session.query(InputParameter).join(KeyAction).filter(KeyAction.name == node.img.text).all()
w = session.query(WorkflowAction).join(Workflow).join(TestScript).join(Project).join(Client).\
join(KeyAction).filter(KeyAction.name == node.img.text).\
filter(Workflow.name==self.root.get_screen('workflow').current_workflowname).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
flow = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==self.root.get_screen('workflow').current_workflowname).\
filter(TestScript.name == self.root.get_screen('workflow').current_script).filter(Project.name==self.root.get_screen('workflow').current_project).\
filter(Client.name==self.root.get_screen('workflow').current_client).all()
wfa = w[0]
#Load the double clicked node into the side editor
self.root.get_screen('workflow').ids.wf_carousel.name = node.img.text
if wfa.expectedresult is not None:
self.root.get_screen('workflow').ids.wf_carousel.er_in.text = wfa.expectedresult
#Load the input parameters
for ip in ips:
wp = session.query(WorkflowParameter).filter(WorkflowParameter.inputparamid == ip.id).\
filter(WorkflowParameter.keyactionid ==w[0].id).all()
lbl = TextInput(hint_text=ip.name)
if len(wp) != 0:
lbl.text = wp[0].value
self.root.get_screen('workflow').ids.wf_carousel.ipgrid_in.add_widget(lbl)
def LoadFilterWorkflows(self, workflow, testscript, client, project, spinner):
#Load the filter values into the relevent spinner
Logger.debug('Load Popup Filter Workflows')
#Clear the spinner
del spinner.values[:]
#Get Result Set from Filter Manager
num_flows = session.query(Workflow).count()
if num_flows - popup_filter_limit < 1:
num_flows = 0
else:
num_flows = num_flows - popup_filter_limit
results = filter.FindWorkflows(workflow, testscript, client, project, num_flows, num_flows+popup_filter_limit)
#Load Result Set Into Spinner
for result in results:
Logger.debug('Result appended %s' % (result.name))
spinner.values.append(result.name)
def LoadFilterTestscripts(self, workflow, testscript, client, project, spinner):
#Load the filter values into the relevent spinner
Logger.debug('Load Popup Filter Test Scripts')
#Clear the spinner
del spinner.values[:]
#Get Result set from Filter Manager
num_scripts = session.query(TestScript).count()
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
scripts = filter.FindTestScripts(workflow, testscript, client, project, num_scripts, num_scripts+popup_filter_limit)
#Load Result Set Into Spinner
for script in scripts:
Logger.debug('Script appended %s' % (script.name))
spinner.values.append(script.name)
def ApplyLoadWorkflowPopupFilter(self, *args):
Logger.debug('Apply load workflow filter popup')
#Get Filter Values
wf = ''
ts = self.root.get_screen('keyactiongroup').pop_up.content.lwp_testscript.text
cl = self.root.get_screen('keyactiongroup').pop_up.content.lwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.lwp_project.text
#Define spinners
workflow_spinner = self.root.get_screen('keyactiongroup').pop_up.content.spinner
testscript_spinner = self.root.get_screen('keyactiongroup').pop_up.content.lwp_testscript
#Load the popups
self.LoadFilterWorkflows(wf, ts, cl, pr, workflow_spinner)
self.LoadFilterTestscripts(wf, ts, cl, pr, testscript_spinner)
def ApplyLoadWorkflowPopupFilter_Script(self, *args):
Logger.debug('Apply load workflow script filter popup')
#Get Filter Values
wf = ''
ts = self.root.get_screen('keyactiongroup').pop_up.content.lwp_testscript.text
cl = self.root.get_screen('keyactiongroup').pop_up.content.lwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.lwp_project.text
#Define spinner
testscript_spinner = self.root.get_screen('keyactiongroup').pop_up.content.lwp_testscript
#Load the popup
self.LoadFilterTestscripts(wf, ts, cl, pr, testscript_spinner)
def LoadWorkflowPopup(self, *args):
Logger.debug('WF: Load Workflow Popup')
popup = Popup(title='Load Workflow', content=LoadWorkflowPopup(), size_hint=(0.4, 0.5))
popup.open()
self.root.get_screen('keyactiongroup').pop_up = popup
#Populate all clients
clients = session.query(Client).all()
for client in clients:
popup.content.lwp_client.values.append(client.name)
#Populate all projects
projects = session.query(Project).all()
for project in projects:
popup.content.lwp_project.values.append(project.name)
#Populate the latest 5 test scripts into the spinner
num_scripts = session.query(TestScript).count()
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
scripts = session.query(TestScript).order_by(TestScript.id)[num_scripts:num_scripts+popup_filter_limit]
for script in scripts:
popup.content.lwp_testscript.values.append(script.name)
#Populate the latest 5 workflows into the spinner
num_flows = session.query(Workflow).count()
if num_flows - popup_filter_limit < 0:
num_flows = 0
else:
num_flows = num_flows - popup_filter_limit
results = session.query(Workflow).order_by(Workflow.id)[num_flows:num_flows+popup_filter_limit]
#Populate values in spinner
for result in results:
popup.content.spinner.values.append(result.name)
def LoadFlow(self, *args):
Logger.debug('Add To Workflow')
ts = self.root.get_screen('keyactiongroup').pop_up.content.lwp_testscript.text
cl = self.root.get_screen('keyactiongroup').pop_up.content.lwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.lwp_project.text
current_workflow=self.root.get_screen('keyactiongroup').pop_up.content.spinner.text
#Clear the current elements in the UI
self.ClearWorkflow()
ka_list = []
#Load the Key Actions for the flow
keyactions = session.query(KeyAction).join(WorkflowAction).\
join(Workflow).join(TestScript).join(Project).join(Client).filter(Workflow.name==current_workflow).\
filter(TestScript.name==ts).filter(Project.name==pr).filter(Client.name==cl).all()
#Load the Key Actions for the flowchart
flowchart_actions = session.query(KeyAction.name, FlowchartPosition.col, FlowchartPosition.row).select_from(FlowchartPosition).\
join(WorkflowAction).join(Workflow).join(KeyAction).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==current_workflow).filter(TestScript.name==ts).\
filter(Project.name==pr).filter(Client.name==cl).all()
if len(flowchart_actions) != 0:
#Load the Next Key Actions for the flowchart
next_actions = session.query(WorkflowNextAction).join(WorkflowAction).\
join(Workflow).join(TestScript).join(Project).join(Client).filter(Workflow.name==current_workflow).\
filter(TestScript.name==ts).filter(Project.name==pr).filter(Client.name==cl).all()
#Identify the elements in the keyactions list that aren't in the flowchart_actions list
for action in keyactions:
match=False
for node in flowchart_actions:
if action.name == node.name:
match=True
if match==False:
ka_list.append(action)
#Populate the flowchart
#Nodes
for node in flowchart_actions:
image = Label(text=node.name)
self.add_flowchart_node(self.root.get_screen('workflow').drag_grid.get_cell(node.row, node.col), image)
#Connections
for action in next_actions:
ka1 = session.query(KeyAction).join(WorkflowAction).filter(WorkflowAction.id == action.keyactionid).all()
ka2 = session.query(KeyAction).join(WorkflowAction).filter(WorkflowAction.id == action.nextactionid).all()
for node in self.root.get_screen('workflow').drag_grid.nodes:
#Add connections to the grid
if len(ka1) > 0:
if ka1[0].name == node.label.img.text:
#Find the connected node
for node2 in self.root.get_screen('workflow').drag_grid.nodes:
if len(ka2) > 0:
if ka2[0].name == node2.label.img.text:
connected_node = node2
connector = Connector(line_color=node.connector.connector_color)
node.connector.connections.append(connector)
node.connections.append(connected_node)
node.grid.connections[0].append(node)
node.grid.connections[1].append(connected_node)
else:
for action in keyactions:
ka_list.append(action)
#Put each remaining element into the draggable list
for action in ka_list:
lbl = Label(text=action.name)
drag_option = DraggableOption(img=lbl, app=self,\
grid=self.root.get_screen('workflow').drag_grid,\
grid_layout=self.root.get_screen('workflow').grid_layout,\
float_layout=self.root.get_screen('workflow').float_layout)
self.root.get_screen('workflow').grid_layout.add_widget(drag_option)
self.root.get_screen('keyactiongroup').pop_up.dismiss()
self.root.get_screen('workflow').ids.current_wf.text = current_workflow
self.root.get_screen('workflow').current_workflowname = current_workflow
self.root.get_screen('workflow').current_wf.text = current_workflow
self.root.get_screen('workflow').current_script = ts
self.root.get_screen('workflow').current_project = pr
self.root.get_screen('workflow').current_client = cl
#----------------------------------------------------------
#-------------------Key Action Page Callbacks--------------
#----------------------------------------------------------
def AddInputParamToGrid(self, *args):
Logger.debug('Add Input Parameter To Grid')
ip_input = TextInput(hint_text='Input Parameter')
self.root.get_screen('keyactiongroup').ids.carousel_ka.current_slide.ipgrid_in.add_widget(ip_input)
self.root.get_screen('keyactiongroup').ids.carousel_ka.current_slide.iplist.append(ip_input)
def CreateFlow(self, *args):
Logger.debug('Create New Flow')
new_script=self.root.get_screen('keyactiongroup').pop_up.content.spinner.text
new_project=self.root.get_screen('keyactiongroup').pop_up.content.cwp_project.text
new_client=self.root.get_screen('keyactiongroup').pop_up.content.cwp_client.text
test_script=session.query(TestScript).join(Project).join(Client).filter(TestScript.name==new_script).\
filter(Project.name == new_project).filter(Client.name == new_client).one()
workflow = Workflow(testscriptid=test_script.id, name=self.root.get_screen('keyactiongroup').pop_up.content.new_flow.text)
session.add(workflow)
session.commit()
for option in selected_ids:
keyaction = session.query(KeyAction).filter(KeyAction.id==option).one()
wfa = WorkflowAction(workflowid=workflow.id, keyactionid=keyaction.id)
session.add(wfa)
session.commit()
self.root.get_screen('keyactiongroup').pop_up.dismiss()
def AddToFlow(self, *args):
Logger.debug('Add To Workflow')
new_flow=self.root.get_screen('keyactiongroup').pop_up.content.spinner.text
new_script=self.root.get_screen('keyactiongroup').pop_up.content.atwp_testscript.text
new_project=self.root.get_screen('keyactiongroup').pop_up.content.atwp_project.text
new_client=self.root.get_screen('keyactiongroup').pop_up.content.atwp_client.text
workflows = session.query(Workflow).join(TestScript).join(Project).join(Client).\
filter(Workflow.name==new_flow).filter(TestScript.name==new_script).\
filter(Project.name == new_project).filter(Client.name == new_client).all()
if len(workflows) < 1:
lbl = Label(text='No Workflow Found')
popup = Popup(title='Error', content=lbl, size_hint=(0.4, 0.5))
popup.open()
else:
for option in selected:
keyaction = session.query(KeyAction).filter(KeyAction.name==option).one()
wfa = WorkflowAction(workflowid=workflows[0].id, keyactionid=keyaction.id)
session.add(wfa)
session.commit()
def ApplyWorkflowPopupFilter(self, *args):
Logger.debug('Apply workflow filter popup')
workflow_spinner = self.root.get_screen('keyactiongroup').pop_up.content.spinner
#Get Filter Values
wf = ''
ts = self.root.get_screen('keyactiongroup').pop_up.content.atwp_testscript.text
cl = self.root.get_screen('keyactiongroup').pop_up.content.atwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.atwp_project.text
self.LoadFilterWorkflows(wf, ts, cl, pr, workflow_spinner)
def ApplyWorkflowPopupFilter_Script(self, *args):
Logger.debug('Apply workflow script filter popup')
testscript_spinner = self.root.get_screen('keyactiongroup').pop_up.content.atwp_testscript
workflow_spinner = self.root.get_screen('keyactiongroup').pop_up.content.spinner
#Get Filter Values
wf = ''
ts = self.root.get_screen('keyactiongroup').pop_up.content.atwp_testscript.text
cl = self.root.get_screen('keyactiongroup').pop_up.content.atwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.atwp_project.text
self.LoadFilterWorkflows(wf, ts, cl, pr, workflow_spinner)
self.LoadFilterTestscripts(wf, ts, cl, pr, testscript_spinner)
def ApplyCreateWorkflowPopupFilterI(self, *args):
Logger.debug('Apply create workflow filter popup I')
#Clear the Spinner
del self.root.get_screen('keyactiongroup').pop_up.content.spinner.values[:]
#Get Filter Values
wf = ''
ts = ''
cl = self.root.get_screen('keyactiongroup').pop_up.content.cwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.cwp_project.text
#Get Result Set from Filter Manager
num_scripts = session.query(TestScript).count()
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
results = filter.FindTestScripts(wf, ts, cl, pr, num_scripts, num_scripts + popup_filter_limit)
#Load Result Set Into Spinner
for result in results:
self.root.get_screen('keyactiongroup').pop_up.content.spinner.values.append(result.name)
def ApplyCreateWorkflowPopupFilterII(self, *args):
Logger.debug('Apply create workflow filter popup II')
#Clear the Spinner
del self.root.get_screen('keyactiongroup').pop_up.content.spinner.values[:]
#Get Filter Values
wf = ''
ts = ''
cl = self.root.get_screen('keyactiongroup').pop_up.content.cwp_client.text
pr = self.root.get_screen('keyactiongroup').pop_up.content.cwp_project.text
#Get Result Set from Filter Manager
num_scripts = session.query(TestScript).count()
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
results = filter.FindTestScripts(wf, ts, cl, pr, num_scripts, num_scripts + popup_filter_limit)
#Load Result Set Into Spinner
for result in results:
self.root.get_screen('keyactiongroup').pop_up.content.spinner.values.append(result.name)
def AddToWorkflowPopup(self, *args):
Logger.debug('WF: Add to Workflow Popup')
popup = Popup(title='Add To Workflow', content=AddToWorkflowPopup(), size_hint=(0.4, 0.5))
popup.open()
self.root.get_screen('keyactiongroup').pop_up = popup
#Populate all clients
clients = session.query(Client).all()
for client in clients:
popup.content.atwp_client.values.append(client.name)
#Populate all projects
projects = session.query(Project).all()
for project in projects:
popup.content.atwp_project.values.append(project.name)
#Populate the latest 5 test scripts into the spinner
num_scripts = session.query(TestScript).count()
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
scripts = session.query(TestScript).order_by(TestScript.id)[num_scripts:num_scripts+popup_filter_limit]
for script in scripts:
popup.content.atwp_testscript.values.append(script.name)
#Populate the latest 5 workflows into the spinner
num_flows = session.query(Workflow).count()
if num_flows - popup_filter_limit < 0:
num_flows = 0
else:
num_flows = num_flows - popup_filter_limit
results = session.query(Workflow).order_by(Workflow.id)[num_flows:num_flows+popup_filter_limit]
#Populate values in spinner
for result in results:
popup.content.spinner.values.append(result.name)
def CreateWorkflowPopup(self, *args):
Logger.debug('WF: Add to Workflow Popup')
popup = Popup(title='Create Workflow', content=CreateWorkflowPopup(), size_hint=(0.4, 0.5))
popup.open()
self.root.get_screen('keyactiongroup').pop_up = popup
#Populate all clients
clients = session.query(Client).all()
for client in clients:
popup.content.cwp_client.values.append(client.name)
#Populate all projects
projects = session.query(Project).all()
for project in projects:
popup.content.cwp_project.values.append(project.name)
#Get the latest 5 Test Scripts
num_scripts = session.query(TestScript).count()
Logger.debug('Num Scripts %s' % (num_scripts))
if num_scripts - popup_filter_limit < 0:
num_scripts = 0
else:
num_scripts = num_scripts - popup_filter_limit
results = session.query(TestScript).order_by(TestScript.id)[num_scripts:num_scripts+popup_filter_limit]
Logger.debug('Num Results %s' % (len(results)))
#Populate values in spinner
for result in results:
popup.content.spinner.values.append(result.name)
Logger.debug('Result %s appended' % (result.name))
#----------------------------------------------------------
#-------------------Filtering Methods----------------------
#Load the next page for the Key Action Group Screen
def LoadNextPageKAG(self, *args):
Logger.debug('Load Next KAG Filter Page')
self.root.get_screen('keyactiongroup').ids.selection_layout.clear_widgets()
mod = self.root.get_screen('keyactiongroup').ids.modulefilter_kag.text
sa = self.root.get_screen('keyactiongroup').ids.safilter_kag.text
ka = self.root.get_screen('keyactiongroup').ids.kafilter_kag.text
cust = self.root.get_screen('keyactiongroup').ids.customfilter.active
filter.setCustomFilteringEnabled(self.root.get_screen('keyactiongroup').ids.custswitch.active)
del selected_ids[:]
del selected[:]
results = filter.NextPage_KA(str(mod), str(sa), str(ka), str(cust), self.root.get_screen('keyactiongroup').current_product)
i = 0
for i in range(0, len(results)):
sel = SelectableButton(text=str(results[i].name), object_id=results[i].id)
self.root.get_screen('keyactiongroup').ids.selection_layout.add_widget(sel)
#Load the previous page for the Key Action Group Screen
def LoadPrevPageKAG(self, *args):
Logger.debug('Load Prev KAG Filter Page')
self.root.get_screen('keyactiongroup').ids.selection_layout.clear_widgets()
mod = self.root.get_screen('keyactiongroup').ids.modulefilter_kag.text
sa = self.root.get_screen('keyactiongroup').ids.safilter_kag.text
ka = self.root.get_screen('keyactiongroup').ids.kafilter_kag.text
cust = self.root.get_screen('keyactiongroup').ids.customfilter.active
filter.setCustomFilteringEnabled(self.root.get_screen('keyactiongroup').ids.custswitch.active)
del selected_ids[:]
del selected[:]
results = filter.PrevPage_KA(str(mod), str(sa), str(ka), str(cust), self.root.get_screen('keyactiongroup').current_product)
i = 0
for i in range(0, len(results)):
sel = SelectableButton(text=str(results[i].name), object_id=results[i].id)
self.root.get_screen('keyactiongroup').ids.selection_layout.add_widget(sel)
#Load the next page for the Key Action Group Screen
def ApplyFilterKAG(self, *args):
Logger.debug('Apply KAG Filter')
filter.FirstPage()
self.root.get_screen('keyactiongroup').ids.selection_layout.clear_widgets()
mod = self.root.get_screen('keyactiongroup').ids.modulefilter_kag.text
sa = self.root.get_screen('keyactiongroup').ids.safilter_kag.text
ka = self.root.get_screen('keyactiongroup').ids.kafilter_kag.text
cust = self.root.get_screen('keyactiongroup').ids.customfilter.active
filter.setCustomFilteringEnabled(self.root.get_screen('keyactiongroup').ids.custswitch.active)
del selected_ids[:]
del selected[:]
results = filter.ApplyFilter(str(mod), str(sa), str(ka), str(cust), str(self.root.get_screen('keyactiongroup').current_product))
i = 0
for i in range(0, len(results)):
sel = SelectableButton(text=str(results[i].name), object_id=results[i].id)
self.root.get_screen('keyactiongroup').ids.selection_layout.add_widget(sel)
#Clear the filter in t he Key Action Group Screen
def ClearFilterKAG(self, *args):
Logger.debug('Clear KAG Filter')
self.root.get_screen('keyactiongroup').ids.modulefilter_kag.text = ''
self.root.get_screen('keyactiongroup').ids.safilter_kag.text = ''
self.root.get_screen('keyactiongroup').ids.kafilter_kag.text = ''
self.root.get_screen('keyactiongroup').ids.custswitch.active = False
filter.setCustomFilteringEnabled(self.root.get_screen('keyactiongroup').ids.custswitch.active)
self.ApplyFilterKAG(args)
def SetCustom(self, *args):
Logger.debug('Set Custom')
def EnableCustomFiltering(self, *args):
Logger.debug('Set Custom Filtering')
def DisableCustomFiltering(self, *args):
Logger.debug('Set Custom Filtering')
#----------------------------------------------------------
#-------------------Action Bar Methods---------------------
def AdvancedOptionsPopup_KAG(self, *args):
Logger.debug('Advanced Options')
popup = Popup(title='Advanced Options', content=KeyActionTabbedPanel(), size_hint=(0.75, 0.75))
self.root.get_screen('keyactiongroup').pop_up = popup
popup.open()
#Find the products in the db and populate the spinner
results = session.query(Product).all()
for result in results:
self.root.get_screen('keyactiongroup').pop_up.content.ka_prodpanel.product_spinner.values.append(result.name)
def ImportKeyActions(self, *args):
Logger.debug('DB Import')
def ImportWorkflows(self, *args):
Logger.debug('DB Import')
def Quit(self, *args):
Logger.debug('Graceful Exit')
def GoToWorkflowPage(self, *args):
Logger.debug('Go To Workflow Page')
sm.current='workflow'
def GoToKeyActionGroupPage(self, *args):
Logger.debug('Go To Key Action Page')
sm.current = 'keyactiongroup'
def DuplicateKeyAction(self, *args):
Logger.debug('Duplicate Key Action')
#Duplicate the selected key actions
self.root.get_screen('keyactiongroup').ids.carousel_ka.clear_widgets()
numSelected = len(selected)
if numSelected > 1:
for action in selected_ids:
#Create the Key Action Carousel Item
keyaction = KeyActionCarouselItem(app=self)
#Set the Module & System Area
sa_rows = session.query(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.sa_in.text = sa_rows[0].name
sysarea = sa_rows[0].name
mod_rows = session.query(Module).join(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.module_in.text = mod_rows[0].name
module = mod_rows[0].name
prod_rows = session.query(Product).filter(Product.name == self.root.get_screen('keyactiongroup').current_product)
product = prod_rows[0].name
rows = session.query(KeyAction).filter(KeyAction.id == action)
#Set the Key Action attributes
keyaction.ka_in.text = "New %s" % (rows[0].name)
name = "New %s" % (rows[0].name)
keyaction.desc_in.text = rows[0].description
desc = rows[0].description
keyaction.custom_in.active = rows[0].custom
custom = rows[0].custom
#Get the Input Parameters
ip_rows = session.query(InputParameter).join(KeyAction).filter(KeyAction.id == action).all()
ip_list = []
for ip in ip_rows:
ip_list.append(ip.name)
writer.SaveKeyAction(product, module, sysarea, name, desc, custom, ip_list)
#Add the base widget to the screen in the carousel
self.root.get_screen('keyactiongroup').ids.carousel_ka.add_widget(keyaction)
#Add Text Inputs to IP Grid
for ip in ip_rows:
ip_input = TextInput(hint_text='Input Parameter')
keyaction.ipgrid_in.add_widget(ip_input)
keyaction.iplist.append(ip_input)
#Set the IP attributes
i=0
for ip in ip_rows:
keyaction.name_list.append(ip.name)
keyaction.id_list.append(ip.id)
keyaction.iplist[i].text = ip.name
i+=1
elif numSelected == 1:
action = selected_ids[0]
#Create the Key Action Carousel Item
keyaction = KeyActionCarouselItem(app=self)
#Set the Module & System Area
sa_rows = session.query(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.sa_in.text = sa_rows[0].name
sysarea = sa_rows[0].name
mod_rows = session.query(Module).join(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.module_in.text = mod_rows[0].name
module = mod_rows[0].name
prod_rows = session.query(Product).filter(Product.name == self.root.get_screen('keyactiongroup').current_product)
product = prod_rows[0].name
rows = session.query(KeyAction).filter(KeyAction.id == action)
#Set the Key Action attributes
keyaction.ka_in.text = "New %s" % (rows[0].name)
name = "New %s" % (rows[0].name)
keyaction.desc_in.text = rows[0].description
desc = rows[0].description
keyaction.custom_in.active = rows[0].custom
custom = rows[0].custom
#Get the Input Parameters
ip_rows = session.query(InputParameter).join(KeyAction).filter(KeyAction.id == action).all()
ip_list = []
for ip in ip_rows:
ip_list.append(ip.name)
writer.SaveKeyAction(product, module, sysarea, name, desc, custom, ip_list)
#Add the base widget to the screen in the carousel
self.root.get_screen('keyactiongroup').ids.carousel_ka.add_widget(keyaction)
#Add Text Inputs to IP Grid
for ip in ip_rows:
ip_input = TextInput(hint_text='Input Parameter')
keyaction.ipgrid_in.add_widget(ip_input)
keyaction.iplist.append(ip_input)
#Set the IP attributes
i=0
for ip in ip_rows:
keyaction.name_list.append(ip.name)
keyaction.id_list.append(ip.id)
keyaction.iplist[i].text = ip.name
i+=1
def DeleteKeyActionPopup(self, *args):
Logger.debug('Delete Key Action Popup')
popup = Popup(title='Delete Key Action', content=DeletePopup(), size_hint=(0.5, 0.4))
self.root.get_screen('keyactiongroup').pop_up=popup
popup.open()
def DeleteKeyAction(self, *args):
Logger.debug('Delete Key Action')
numSelected = len(selected)
if numSelected > 1:
for action in selected_ids:
results = session.query(KeyAction).filter(KeyAction.id == action).all()
if len(results) > 1:
Logger.debug('Business Key Violation encountered in Key Action table')
elif len(results) == 1:
result = results[0]
session.delete(result)
session.commit()
elif numSelected == 1:
action = selected_ids[0]
results = session.query(KeyAction).filter(KeyAction.id == action).all()
if len(results) > 1:
Logger.debug('Business Key Violation encountered in Key Action table')
elif len(results) == 1:
result = results[0]
session.delete(result)
session.commit()
self.ApplyFilterKAG(args)
#----------------------------------------------------------
#-------------------Grid Methods---------------------------
def SelectButton(self, *args):
Logger.debug('Select Button')
def AddWorkflow(self, *args):
Logger.debug('Add Workflow')
#----------------------------------------------------------
#-------------------Quick Key Action Methods---------------
def ClearQuickAction(self, *args):
Logger.debug('QKA: Clear Quick Action')
#Remove all selected id's from the master list
del carousel_ids[:]
self.root.get_screen('keyactiongroup').ids.carousel_ka.clear_widgets()
keyaction = KeyActionCarouselItem(app=self)
#Add the base widget to the screen in the carousel
self.root.get_screen('keyactiongroup').ids.carousel_ka.add_widget(keyaction)
def ValidateQuickKeyAction(self, *args):
pass
def SaveQuickKeyAction(self, *args):
Logger.debug('QKA: Save Quick Key Action Frame')
i = 0
#Loop through the children of the carousel and save each one
if len(carousel_ids)>1:
Logger.debug('QKA: Selected IDs Length %s' % (len(carousel_ids)))
for child in self.root.get_screen('keyactiongroup').ids.carousel_ka.slides:
mod_text = child.module_in.text
sa_text = child.sa_in.text
ka_text = child.ka_in.text
desc_text = child.desc_in.text
if len(mod_text) < 2:
lbl = Label(text='Module Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(sa_text) < 2:
lbl = Label(text='System Area Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(ka_text) < 2:
lbl = Label(text='Key Action Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(desc_text) < 2:
lbl = Label(text='Description not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
for child in self.root.get_screen('keyactiongroup').ids.carousel_ka.slides:
orig_id_list = child.id_list
name_list = child.name_list
keyactions = writer.SaveKeyActionByID(child, carousel_ids[i])
writer.SaveInputParameters(child, name_list, carousel_ids[i], orig_id_list)
i += 1
#If there is only one child, save it
elif len(carousel_ids) == 1:
child = self.root.get_screen('keyactiongroup').ids.carousel_ka.slides[0]
mod_text = child.module_in.text
sa_text = child.sa_in.text
ka_text = child.ka_in.text
desc_text = child.desc_in.text
if len(mod_text) < 2:
lbl = Label(text='Module Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(sa_text) < 2:
lbl = Label(text='System Area Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(ka_text) < 2:
lbl = Label(text='Key Action Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
if len(desc_text) < 2:
lbl = Label(text='Description not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
return True
Logger.debug('QKA: Selected IDs Length 1')
orig_id_list = child.id_list
name_list = child.name_list
keyactions = writer.SaveKeyActionByID(child, carousel_ids[i])
writer.SaveInputParameters(child, name_list, carousel_ids[i], orig_id_list)
else:
#Save the key action as a new key action
Logger.debug('QKA: Selected IDs Length 0')
if len(self.root.get_screen('keyactiongroup').ids.carousel_ka.slides) != 0:
#Only execute if there are elements in the carousel
Logger.debug('QKA: Elements exist in the carousel')
child = self.root.get_screen('keyactiongroup').ids.carousel_ka.slides[0]
mod_text = child.module_in.text
sa_text = child.sa_in.text
ka_text = child.ka_in.text
desc_text = child.desc_in.text
if len(mod_text) < 2:
lbl = Label(text='Module Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
if len(sa_text) < 2:
lbl = Label(text='System Area Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
if len(ka_text) < 2:
lbl = Label(text='Key Action Name not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
if len(desc_text) < 2:
lbl = Label(text='Description not long enough')
er_popup = Popup(title='Error', content=lbl, size_hint=(0.5, 0.3))
er_popup.open()
return True
#Module
prod_rows = session.query(Product).filter(Product.name == self.root.get_screen('keyactiongroup').current_product).all()
rows = session.query(Module).filter(Module.name == child.module_in.text).all()
if len(rows) > 1:
Logger.debug('Business Key Violation encountered in Module table')
elif len(rows) != 1:
mod = Module()
mod.name = child.module_in.text
mod.productid = prod_rows[0].id
session.add(mod)
session.commit()
Logger.debug('QKA: Module Committed %s' % (child.module_in.text))
#System Area
sa_rows = session.query(SystemArea).filter(SystemArea.name == child.sa_in.text).all()
if len(sa_rows) > 1:
Logger.debug('Business Key Violation encountered in System Area table')
elif len(sa_rows) == 1:
sa_rows[0].name == child.sa_in.text
if len(rows) == 1:
sa_rows[0].moduleid = rows[0].id
else:
sa_rows[0].moduleid = mod.id
else:
sys = SystemArea()
sys.name = child.sa_in.text
session.add(sys)
if len(rows) == 1:
sys.moduleid = rows[0].id
else:
sys.moduleid = mod.id
session.commit()
Logger.debug('QKA: System Area Committed %s' % (child.sa_in.text))
#Key Action
kaName = child.ka_in.text
keyaction = KeyAction(name=kaName)
session.add(keyaction)
if len(sa_rows) == 1:
keyaction.systemareaid = sa_rows[0].id
else:
keyaction.systemareaid = sys.id
keyaction.description = child.desc_in.text
keyaction.custom = child.custom_in.active
session.commit()
Logger.debug('QKA: Key Action Committed %s' % (child.ka_in.text))
#Input Parameters
for input in child.iplist:
if input.text != '' and input.text is not None:
inpparam = InputParameter(name=input.text)
session.add(inpparam)
inpparam.keyactionid = keyaction.id
session.commit()
self.ApplyFilterKAG(args)
del carousel_ids[:]
#del selected[:]
self.root.get_screen('keyactiongroup').ids.carousel_ka.clear_widgets()
def LoadQuickAction(self, *args):
Logger.debug('Load Quick Action')
self.root.get_screen('keyactiongroup').ids.carousel_ka.clear_widgets()
numSelected = len(selected)
del carousel_ids[:]
if numSelected > 1:
for action in selected_ids:
rows = session.query(KeyAction).\
filter(KeyAction.id==action).all()
if len(rows) > 1:
#More than one business key is found
Logger.debug('Business Key Violation encountered in Key Action table')
elif len(rows) == 1:
#Exactly one business key is found
#Add the key action to the list of id's in the carousel
carousel_ids.append(rows[0].id)
#Create the Key Action Carousel Item
keyaction = KeyActionCarouselItem(app=self)
#Set the Module & System Area
sa_rows = session.query(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.sa_in.text = sa_rows[0].name
mod_rows = session.query(Module).join(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.module_in.text = mod_rows[0].name
#Set the Key Action attributes
keyaction.ka_in.text = rows[0].name
keyaction.desc_in.text = rows[0].description
keyaction.custom_in.active = rows[0].custom
#Get the Input Parameters
ip_rows = session.query(InputParameter).join(KeyAction).filter(KeyAction.id == action).all()
#Add the base widget to the screen in the carousel
self.root.get_screen('keyactiongroup').ids.carousel_ka.add_widget(keyaction)
#Add Text Inputs to IP Grid
for ip in ip_rows:
ip_input = TextInput(hint_text='Input Parameter')
keyaction.ipgrid_in.add_widget(ip_input)
keyaction.iplist.append(ip_input)
#Set the IP attributes
i=0
for ip in ip_rows:
keyaction.name_list.append(ip.name)
keyaction.id_list.append(ip.id)
keyaction.iplist[i].text = ip.name
i+=1
else:
#No matching business keys are found
Logger.debug('Business Key Called from UI that does not exist in DB')
elif numSelected == 1:
action = selected_ids[0]
rows = session.query(KeyAction).filter(KeyAction.id==action).all()
if len(rows) > 1:
#More than one business key is found
Logger.debug('Business Key Violation encountered in Key Action table')
elif len(rows) == 1:
#Exactly one business key is found
keyaction = KeyActionCarouselItem(app=self)
#Add the key action to the list of id's in the carousel
carousel_ids.append(rows[0].id)
#Set the Module & System Area
sa_rows = session.query(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.sa_in.text = sa_rows[0].name
mod_rows = session.query(Module).join(SystemArea).join(KeyAction).filter(KeyAction.id == action)
keyaction.module_in.text = mod_rows[0].name
#Set the Key Action attributes
keyaction.ka_in.text = rows[0].name
keyaction.desc_in.text = rows[0].description
keyaction.custom_in.active = rows[0].custom
#Get the Input Parameters
ip_rows = session.query(InputParameter).join(KeyAction).filter(KeyAction.id == action).all()
#Add the base widget to the screen in the carousel
self.root.get_screen('keyactiongroup').ids.carousel_ka.add_widget(keyaction)
#Add Text Inputs to IP Grid
for ip in ip_rows:
ip_input = TextInput(hint_text='Input Parameter')
keyaction.ipgrid_in.add_widget(ip_input)
keyaction.iplist.append(ip_input)
#Set the IP attributes
i=0
for ip in ip_rows:
keyaction.name_list.append(ip.name)
keyaction.id_list.append(ip.id)
keyaction.iplist[i].text = ip.name
i+=1
else:
#No matching business keys are found
Logger.debug('Business Key Called from UI that does not exist in DB')
if __name__ == '__main__':
TestScriptBuilderApp().run()
| 48.617116
| 204
| 0.575148
|
4a063d6bb40b2b5942ef6f07d12c2f1601e69ea4
| 990
|
py
|
Python
|
create_pickled_national_bracket.py
|
dhermes/ncaa-bracket-scenarios
|
f314581068aceb4e60ad059061313b4afdcd9ca8
|
[
"Apache-2.0"
] | 1
|
2016-12-11T18:09:45.000Z
|
2016-12-11T18:09:45.000Z
|
create_pickled_national_bracket.py
|
dhermes/ncaa-bracket-scenarios
|
f314581068aceb4e60ad059061313b4afdcd9ca8
|
[
"Apache-2.0"
] | null | null | null |
create_pickled_national_bracket.py
|
dhermes/ncaa-bracket-scenarios
|
f314581068aceb4e60ad059061313b4afdcd9ca8
|
[
"Apache-2.0"
] | null | null | null |
import os
import pickle
from game_tree_classes import GameSlots
from game_tree_classes import Team
from game_tree_classes import WinnerOf
import utils
def main():
game_slots = GameSlots()
for slot_id in xrange(64):
# NOTE: This relies on the assumption from get_team_mapping.py
# that the team ID is 1 more than the slot ID.
team = Team(slot_id + 1)
game_slots.add_slot(slot_id, team)
prev_first, first_index = 0, 64
for round_size in (32, 16, 8, 4, 2, 1):
for slot_offset in xrange(round_size):
slot_id = slot_offset + first_index
prev_slot1 = prev_first + 2 * slot_offset
prev_slot2 = prev_first + 2 * slot_offset + 1
winner_of = WinnerOf(prev_slot1, prev_slot2)
game_slots.add_slot(slot_id, winner_of)
prev_first, first_index = first_index, first_index + round_size
game_slots.save(utils.BASE_BRACKET_PICKLE)
if __name__ == '__main__':
main()
| 30.9375
| 71
| 0.671717
|
4a063e4a6f283f63b8ebf786a89b3cb9ffb7de5d
| 23,529
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/resource/_params.py
|
dyna-dot/azure-cli
|
47d67e6e47a574a82b53c181084b29479aa92d51
|
[
"MIT"
] | 1
|
2019-10-01T10:29:15.000Z
|
2019-10-01T10:29:15.000Z
|
src/azure-cli/azure/cli/command_modules/resource/_params.py
|
dyna-dot/azure-cli
|
47d67e6e47a574a82b53c181084b29479aa92d51
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/resource/_params.py
|
dyna-dot/azure-cli
|
47d67e6e47a574a82b53c181084b29479aa92d51
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-locals, too-many-statements, line-too-long
def load_arguments(self, _):
from argcomplete.completers import FilesCompleter
from azure.mgmt.resource.resources.models import DeploymentMode
from azure.mgmt.resource.locks.models import LockLevel
from azure.mgmt.resource.managedapplications.models import ApplicationLockLevel
from azure.cli.core.api import get_subscription_id_list
from azure.cli.core.commands.parameters import (
resource_group_name_type, get_location_type, tag_type, tags_type, get_resource_group_completion_list, no_wait_type, file_type,
get_enum_type, get_three_state_flag)
from azure.cli.core.profiles import ResourceType
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.command_modules.resource._completers import (
get_policy_completion_list, get_policy_set_completion_list, get_policy_assignment_completion_list,
get_resource_types_completion_list, get_providers_completion_list)
from azure.cli.command_modules.resource._validators import (
validate_lock_parameters, validate_resource_lock, validate_group_lock, validate_subscription_lock, validate_metadata, RollbackAction,
validate_msi)
# BASIC PARAMETER CONFIGURATION
resource_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The resource name. (Ex: myC)')
resource_type_type = CLIArgumentType(help="The resource type (Ex: 'resC'). Can also accept namespace/type format (Ex: 'Microsoft.Provider/resC')")
resource_namespace_type = CLIArgumentType(options_list='--namespace', completer=get_providers_completion_list, help="Provider namespace (Ex: 'Microsoft.Provider')")
resource_parent_type = CLIArgumentType(required=False, options_list=['--parent'], help="The parent path (Ex: 'resA/myA/resB/myB')")
existing_policy_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_completion_list, help='The policy definition name.')
existing_policy_set_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_set_completion_list, help='The policy set definition name.')
subscription_type = CLIArgumentType(options_list='--subscription', FilesCompleter=get_subscription_id_list, help='The subscription id of the policy [set] definition.')
management_group_name_type = CLIArgumentType(options_list='--management-group', help='The name of the management group of the policy [set] definition.')
identity_scope_type = CLIArgumentType(help="Scope that the system assigned identity can access")
identity_role_type = CLIArgumentType(options_list=['--role'], help="Role name or id that will be assigned to the managed identity")
_PROVIDER_HELP_TEXT = 'the resource namespace, aka \'provider\''
with self.argument_context('resource') as c:
c.argument('no_wait', no_wait_type)
c.argument('resource_group_name', resource_group_name_type, arg_group='Resource Id')
c.ignore('resource_id')
c.argument('resource_name', resource_name_type, arg_group='Resource Id')
c.argument('api_version', help='The api version of the resource (omit for latest)', required=False, arg_group='Resource Id')
c.argument('resource_provider_namespace', resource_namespace_type, arg_group='Resource Id')
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list, arg_group='Resource Id')
c.argument('parent_resource_path', resource_parent_type, arg_group='Resource Id')
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_ids', nargs='+', options_list=['--ids'], help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.', arg_group='Resource Id')
c.argument('include_response_body', arg_type=get_three_state_flag(), help='Use if the default command output doesn\'t capture all of the property data.')
with self.argument_context('resource list') as c:
c.argument('name', resource_name_type)
with self.argument_context('resource move') as c:
c.argument('ids', nargs='+')
with self.argument_context('resource invoke-action') as c:
c.argument('action', help='The action that will be invoked on the specified resource')
c.argument('request_body', help='JSON encoded parameter arguments for the action that will be passed along in the post request body. Use @{file} to load from a file.')
with self.argument_context('resource create') as c:
c.argument('resource_id', options_list=['--id'], help='Resource ID.', action=None)
c.argument('properties', options_list=['--properties', '-p'], help='a JSON-formatted string containing resource properties')
c.argument('is_full_object', action='store_true', help='Indicates that the properties object includes other options such as location, tags, sku, and/or plan.')
with self.argument_context('resource link') as c:
c.argument('target_id', options_list=['--target', c.deprecate(target='--target-id', redirect='--target', hide=True)], help='Fully-qualified resource ID of the resource link target.')
c.argument('link_id', options_list=['--link', c.deprecate(target='--link-id', redirect='--link', hide=True)], help='Fully-qualified resource ID of the resource link.')
c.argument('notes', help='Notes for the link.')
c.argument('scope', help='Fully-qualified scope for retrieving links.')
c.argument('filter_string', options_list=['--filter', c.deprecate(target='--filter-string', redirect='--filter', hide=True)], help='Filter string for limiting results.')
with self.argument_context('provider') as c:
c.ignore('top')
c.argument('resource_provider_namespace', options_list=['--namespace', '-n'], completer=get_providers_completion_list, help=_PROVIDER_HELP_TEXT)
with self.argument_context('provider register') as c:
c.argument('wait', action='store_true', help='wait for the registration to finish')
with self.argument_context('provider unregister') as c:
c.argument('wait', action='store_true', help='wait for unregistration to finish')
with self.argument_context('provider operation') as c:
c.argument('api_version', help="The api version of the 'Microsoft.Authorization/providerOperations' resource (omit for latest)")
with self.argument_context('feature') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT)
c.argument('feature_name', options_list=['--name', '-n'], help='the feature name')
with self.argument_context('feature list') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT)
with self.argument_context('policy') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group where the policy will be applied')
with self.argument_context('policy definition', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_definition_name', arg_type=existing_policy_definition_name_type)
c.argument('rules', help='JSON formatted string or a path to a file with such content', type=file_type, completer=FilesCompleter())
c.argument('display_name', help='Display name of policy definition.')
c.argument('description', help='Description of policy definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
c.argument('metadata', min_api='2017-06-01-preview', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
c.argument('management_group', arg_type=management_group_name_type)
c.argument('mode', options_list=['--mode', '-m'], help='Mode of the policy definition, e.g. All, Indexed. Please visit https://aka.ms/azure-policy-mode for more information.', min_api='2016-12-01')
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy definition create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy definition.')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.ignore('_subscription')
c.argument('name', options_list=['--name', '-n'], completer=get_policy_assignment_completion_list, help='Name of the policy assignment.')
c.argument('scope', help='Scope to which this policy assignment applies.')
c.argument('disable_scope_strict_match', action='store_true', help='Include policy assignments either inherited from parent scope or at child scope.')
c.argument('display_name', help='Display name of the policy assignment.')
c.argument('policy', help='Name or id of the policy definition.', completer=get_policy_completion_list)
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy assignment.')
c.argument('params', options_list=['--params', '-p'], help='JSON formatted string or a path to a file or uri with parameter values of the policy rule.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as c:
c.argument('policy_set_definition', options_list=['--policy-set-definition', '-d'], help='Name or id of the policy set definition.')
c.argument('sku', options_list=['--sku', '-s'], help='policy sku.', arg_type=get_enum_type(['free', 'standard']))
c.argument('notscopes', options_list='--not-scopes', nargs='+')
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, arg_group='Managed Identity', min_api='2018-05-01') as c:
c.argument('assign_identity', nargs='*', validator=validate_msi, help="Assigns a system assigned identity to the policy assignment.")
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2019-06-01') as c:
c.argument('enforcement_mode', options_list=['--enforcement-mode', '-e'], help='Enforcement mode of the policy assignment, e.g. Default, DoNotEnforce. Please visit https://aka.ms/azure-policyAssignment-enforcement-mode for more information.', arg_type=get_enum_type(['Default', 'DoNotEnforce']))
with self.argument_context('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy set-definition', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_set_definition_name', arg_type=existing_policy_set_definition_name_type)
c.argument('display_name', help='Display name of policy set definition.')
c.argument('description', help='Description of policy set definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter())
c.argument('definitions', help='JSON formatted string or a path to a file or uri containing definitions.', type=file_type, completer=FilesCompleter())
c.argument('management_group', arg_type=management_group_name_type)
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy set-definition create', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy set definition.')
with self.argument_context('group') as c:
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g'])
with self.argument_context('group deployment') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list)
c.argument('deployment_name', options_list=['--name', '-n'], required=True, help='The deployment name.')
c.argument('template_file', completer=FilesCompleter(), type=file_type, help="a template file path in the file system")
c.argument('template_uri', help='a uri to a remote template file')
c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)')
c.argument('parameters', action='append', nargs='+', completer=FilesCompleter())
c.argument('rollback_on_error', nargs='?', action=RollbackAction, help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.')
with self.argument_context('group deployment create') as c:
c.argument('deployment_name', options_list=['--name', '-n'], required=False,
help='The deployment name. Default to template file base name')
c.argument('handle_extended_json_format', action='store_true', is_preview=True,
help='Support to handle extended template content including multiline and comments in deployment')
with self.argument_context('group deployment operation show') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('deployment') as c:
c.argument('deployment_name', options_list=['--name', '-n'], required=True, help='The deployment name.')
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
c.argument('template_file', completer=FilesCompleter(), type=file_type, help="a template file path in the file system")
c.argument('template_uri', help='a uri to a remote template file')
c.argument('parameters', action='append', nargs='+', completer=FilesCompleter())
with self.argument_context('deployment create') as c:
c.argument('deployment_name', options_list=['--name', '-n'], required=False,
help='The deployment name. Default to template file base name')
with self.argument_context('deployment operation show') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('group export') as c:
c.argument('include_comments', action='store_true')
c.argument('include_parameter_default_value', action='store_true')
with self.argument_context('group create') as c:
c.argument('rg_name', options_list=['--name', '--resource-group', '-n', '-g'], help='name of the new resource group', completer=None)
with self.argument_context('tag') as c:
c.argument('tag_name', options_list=['--name', '-n'])
c.argument('tag_value', options_list='--value')
with self.argument_context('lock') as c:
c.argument('lock_name', options_list=['--name', '-n'], validator=validate_lock_parameters)
c.argument('level', arg_type=get_enum_type(LockLevel), options_list=['--lock-type', '-t'], help='The type of lock restriction.')
c.argument('parent_resource_path', resource_parent_type)
c.argument('resource_provider_namespace', resource_namespace_type)
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='Name or ID of the resource being locked. If an ID is given, other resource arguments should not be given.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('resource_group', resource_group_name_type, validator=validate_lock_parameters)
with self.argument_context('resource lock') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='If an ID is given, other resource arguments should not be given.', validator=validate_resource_lock)
with self.argument_context('group lock') as c:
c.argument('resource_group', resource_group_name_type, validator=validate_group_lock, id_part=None)
with self.argument_context('group lock create') as c:
c.argument('resource_group', required=True)
with self.argument_context('account lock') as c:
c.argument('resource_group', ignore_type, validator=validate_subscription_lock)
for scope in ['account', 'group']:
with self.argument_context('{} lock'.format(scope)) as c:
c.ignore('resource_provider_namespace', 'parent_resource_path', 'resource_type', 'resource_name')
for scope in ['lock', 'account lock', 'group lock', 'resource lock']:
with self.argument_context(scope) as c:
c.argument('lock_name', options_list=['--name', '-n'], help='Name of the lock')
c.argument('level', options_list=['--lock-type', '-t'], arg_type=get_enum_type([LockLevel.can_not_delete, LockLevel.read_only]), help='The type of lock restriction.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('notes', help='Notes about this lock.')
with self.argument_context('managedapp') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application', id_part='resource_group')
c.argument('application_name', options_list=['--name', '-n'], id_part='name')
with self.argument_context('managedapp definition') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application definition', id_part='resource_group')
c.argument('application_definition_name', options_list=['--name', '-n'], id_part='name')
with self.argument_context('managedapp create') as c:
c.argument('name', options_list=['--name', '-n'], help='name of the new managed application', completer=None)
c.argument('location', help='the managed application location')
c.argument('managedapp_definition_id', options_list=['--managedapp-definition-id', '-d'], help='the full qualified managed application definition id')
c.argument('managedby_resource_group_id', options_list=['--managed-rg-id', '-m'], help='the resource group managed by the managed application')
c.argument('parameters', help='JSON formatted string or a path to a file with such content', type=file_type)
with self.argument_context('managedapp definition create') as c:
c.argument('lock_level', arg_type=get_enum_type(ApplicationLockLevel), help='The type of lock restriction.')
c.argument('authorizations', options_list=['--authorizations', '-a'], nargs='+', help="space-separated authorization pairs in a format of <principalId>:<roleDefinitionId>")
c.argument('createUiDefinition', options_list=['--create-ui-definition', '-c'], help='JSON formatted string or a path to a file with such content', type=file_type)
c.argument('mainTemplate', options_list=['--main-template', '-t'], help='JSON formatted string or a path to a file with such content', type=file_type)
with self.argument_context('account') as c:
c.argument('subscription', options_list=['--subscription', '-s'], help='Name or ID of subscription.', completer=get_subscription_id_list)
c.ignore('_subscription') # hide global subscription parameter
with self.argument_context('account management-group') as c:
c.argument('group_name', options_list=['--name', '-n'])
with self.argument_context('account management-group show') as c:
c.argument('expand', options_list=['--expand', '-e'], action='store_true')
c.argument('recurse', options_list=['--recurse', '-r'], action='store_true')
with self.argument_context('account management-group create') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent', options_list=['--parent', '-p'])
with self.argument_context('account management-group update') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent_id', options_list=['--parent', '-p'])
with self.argument_context('rest') as c:
c.argument('method', options_list=['--method', '-m'], arg_type=get_enum_type(['head', 'get', 'put', 'post', 'delete', 'options', 'patch'], default='get'),
help='HTTP request method')
c.argument('uri', options_list=['--uri', '-u'], help='request uri. For uri without host, CLI will assume "https://management.azure.com/".'
' Common tokens will also be replaced with real values including "{subscriptionId}"')
c.argument('headers', nargs='+', help="Space-separated headers in KEY=VALUE format or JSON string. Use @{file} to load from a file")
c.argument('uri_parameters', nargs='+', help='Space-separated queries in KEY=VALUE format or JSON string. Use @{file} to load from a file')
c.argument('skip_authorization_header', action='store_true', help='do not auto append "Authorization" header')
c.argument('body', options_list=['--body', '-b'], help='request body. Use @{file} to load from a file')
c.argument('output_file', help='save response payload to a file')
c.argument('resource', help='Resource url for which CLI should acquire a token in order to access '
'the service. The token will be placed in the "Authorization" header. By default, '
'CLI can figure this out based on "--url" argument, unless you use ones not in the list '
'of "az cloud show --query endpoints"')
| 78.43
| 303
| 0.713417
|
4a063e89eee4f827e6f4ac72ffac29117326e281
| 2,106
|
py
|
Python
|
dusty/payload.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421
|
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
dusty/payload.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404
|
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
dusty/payload.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16
|
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
import json
import yaml
from .constants import VERSION
class Payload(object):
def __init__(self, fn, *args, **kwargs):
self.fn = fn
self.run_on_daemon = True
self.client_version = VERSION
self.args = args
self.kwargs = kwargs
self.suppress_warnings = False
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.fn == other.fn and self.args == other.args and self.kwargs == other.kwargs
return False
def run(self):
self.fn(*self.args, **self.kwargs)
def serialize(self):
fn_key = function_key(self.fn)
if fn_key not in _daemon_command_mapping:
raise RuntimeError('Function key {} not found; you may need to decorate your function'.format(fn_key))
doc = {'fn_key': fn_key, 'client_version': self.client_version, 'suppress_warnings': self.suppress_warnings,
'args': self.args, 'kwargs': self.kwargs}
return json.dumps(doc)
@staticmethod
def deserialize(doc):
return yaml.safe_load(doc)
_daemon_command_mapping = {}
def function_key(fn):
return '{}.{}'.format(fn.__module__, fn.__name__)
def daemon_command(fn):
key = function_key(fn)
if key in _daemon_command_mapping and _daemon_command_mapping[key] != fn:
raise RuntimeError("Function mapping key collision: {}. Name one of the functions something else".format(key))
_daemon_command_mapping[key] = fn
return fn
def get_payload_function(fn_key):
if fn_key not in _daemon_command_mapping:
raise RuntimeError('Function key {} not found'.format(fn_key))
return _daemon_command_mapping[fn_key]
def init_yaml_constructor():
"""
This dark magic is used to make yaml.safe_load encode all strings as utf-8,
where otherwise python unicode strings would be returned for non-ascii chars
"""
def utf_encoding_string_constructor(loader, node):
return loader.construct_scalar(node).encode('utf-8')
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', utf_encoding_string_constructor)
| 35.1
| 118
| 0.687559
|
4a063ecf8a206bcfe3145a95f5dcff5496d0eede
| 6,197
|
py
|
Python
|
indico/modules/events/models/reviews.py
|
aiforrural/Digital-Events-Example
|
628aaa8727b259b9367ac0ae1c5ba8e9e95eca82
|
[
"MIT"
] | 1
|
2021-02-08T09:34:27.000Z
|
2021-02-08T09:34:27.000Z
|
indico/modules/events/models/reviews.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
indico/modules/events/models/reviews.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.ext.hybrid import hybrid_property
from indico.util.caching import memoize_request
from indico.util.locators import locator_property
from indico.web.flask.util import url_for
class ProposalGroupProxy:
"""The object that the proposals can be grouped by.
It provides all necessary methods for building the URLs, displaying the
grouping information, etc.
"""
title_attr = 'title'
full_title_attr = 'full_title'
def __init__(self, group):
self.instance = group
def __eq__(self, other):
if isinstance(other, ProposalGroupProxy):
return self.instance == other.instance
elif isinstance(other, type(self.instance)):
return self.instance == other
else:
return False
def __hash__(self):
return hash(self.instance)
def __ne__(self, other):
return not (self == other)
@property
def title(self):
return getattr(self.instance, self.title_attr)
@property
def full_title(self):
return (getattr(self.instance, self.full_title_attr)
if hasattr(self.instance, self.full_title_attr)
else self.title)
@locator_property
def locator(self):
return self.instance.locator
def __repr__(self):
return f'<ProposalGroupProxy: {self.instance}>'
class ProposalRevisionMixin:
"""Properties and methods of a proposal revision."""
#: The attribute of the revision used to fetch the proposal object.
proposal_attr = None
#: Whether the reviewing process supports multiple revisions per proposal.
#: If set to false it is assumed that the reviewing process supports only
#: one revision per proposal.
revisions_enabled = True
@property
def proposal(self):
# Property to fetch the proposal object. If multiple revisions are
# disabled, the revision is represented by the proposal object itself.
return getattr(self, self.proposal_attr) if self.revisions_enabled else self
def get_timeline(self, user=None):
raise NotImplementedError
def get_reviews(self, group=None, user=None):
reviews = self.reviews[:]
if group:
reviews = [x for x in reviews if x.group == group]
if user:
reviews = [x for x in reviews if x.user == user]
return reviews
def get_reviewed_for_groups(self, user, include_reviewed=False):
raise NotImplementedError
@memoize_request
def get_reviewer_render_data(self, user):
groups = self.get_reviewed_for_groups(user, include_reviewed=True)
reviews = {x.group: x for x in self.get_reviews(user=user)}
reviewed_groups = {x.group for x in reviews.values()}
missing_groups = groups - reviewed_groups
return {'groups': groups,
'missing_groups': missing_groups,
'reviewed_groups': reviewed_groups,
'reviews': reviews}
class ProposalMixin:
"""
Classes that represent a proposal object should extend this class (ex:
Abstract, Paper).
"""
#: A unique identifier to handle rendering differences between proposal
#: types
proposal_type = None
#: Attribute to retrieve the object with access to the reviewing settings
call_for_proposals_attr = None
#: Whether there is support for multiple revisions per proposal or just one
revisions_enabled = True
# endpoints
delete_comment_endpoint = None
create_comment_endpoint = None
edit_comment_endpoint = None
create_review_endpoint = None
edit_review_endpoint = None
create_judgment_endpoint = None
@property
def cfp(self):
return getattr(self.event, self.call_for_proposals_attr)
@property
def is_in_final_state(self):
raise NotImplementedError
def get_revisions(self):
if self.revisions_enabled:
raise NotImplementedError
else:
return [self]
def get_last_revision(self):
if self.revisions_enabled:
raise NotImplementedError
else:
return self
def can_comment(self, user):
raise NotImplementedError
def can_review(self, user, check_state=False):
raise NotImplementedError
def get_delete_comment_url(self, comment):
return url_for(self.delete_comment_endpoint, comment)
def get_save_comment_url(self, comment=None):
return (url_for(self.edit_comment_endpoint, comment)
if comment
else url_for(self.create_comment_endpoint, self))
def get_save_review_url(self, group=None, review=None):
return (url_for(self.edit_review_endpoint, review)
if review
else url_for(self.create_review_endpoint, self, group))
def get_save_judgment_url(self):
return url_for(self.create_judgment_endpoint, self)
class ProposalCommentMixin:
timeline_item_type = 'comment'
def can_edit(self, user):
raise NotImplementedError
class ProposalReviewMixin:
"""Mixin for proposal reviews.
Classes that represent a review of a proposal should extend this class
(ex: AbstractReview, PaperReview).
"""
#: A unique identifier to handle rendering differences between timeline
#: items
timeline_item_type = 'review'
#: The revision object that the review refers to
revision_attr = None
#: Object used to group reviews together
group_attr = None
#: Proxy class to provide the necessary properties and methods to the
#: review grouping object
group_proxy_cls = ProposalGroupProxy
@hybrid_property
def revision(self):
return getattr(self, self.revision_attr)
@property
def group(self):
return self.group_proxy_cls(getattr(self, self.group_attr))
@property
def score(self):
return None
def can_edit(self, user):
raise NotImplementedError
| 30.082524
| 84
| 0.679845
|
4a06417f2c7b60b9dc5c886887a30aefb8b754ec
| 169
|
py
|
Python
|
quantipy/core/tools/dp/confirmit/helpers.py
|
acaide/quantipy3
|
7a466b3d0d4da7e95a2ce7e34e6e55f7a1f85a51
|
[
"MIT"
] | null | null | null |
quantipy/core/tools/dp/confirmit/helpers.py
|
acaide/quantipy3
|
7a466b3d0d4da7e95a2ce7e34e6e55f7a1f85a51
|
[
"MIT"
] | null | null | null |
quantipy/core/tools/dp/confirmit/helpers.py
|
acaide/quantipy3
|
7a466b3d0d4da7e95a2ce7e34e6e55f7a1f85a51
|
[
"MIT"
] | null | null | null |
def int_or_float(variable):
numeric_scale = variable.get('scale')
if numeric_scale and numeric_scale != 0:
return 'float'
else:
return 'int'
| 24.142857
| 44
| 0.639053
|
4a06420dd43712ae235d1ef9d6fbd68e88342bd8
| 1,342
|
py
|
Python
|
pointcnn_cls/scannet_x2_l4.py
|
vistart/PointCNN
|
bc27b1239fa70830600807d439a3e4e7b689c0aa
|
[
"MIT"
] | 1
|
2018-12-09T17:16:02.000Z
|
2018-12-09T17:16:02.000Z
|
pointcnn_cls/scannet_x2_l4.py
|
vistart/PointCNN
|
bc27b1239fa70830600807d439a3e4e7b689c0aa
|
[
"MIT"
] | null | null | null |
pointcnn_cls/scannet_x2_l4.py
|
vistart/PointCNN
|
bc27b1239fa70830600807d439a3e4e7b689c0aa
|
[
"MIT"
] | 1
|
2020-04-03T13:49:06.000Z
|
2020-04-03T13:49:06.000Z
|
#!/usr/bin/python3
import os
import sys
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
load_fn = data_utils.load_cls_train_val
balance_fn = None
map_fn = None
keep_remainder = True
save_ply_fn = None
num_class = 17
sample_num = 1024
batch_size = 128
num_epochs = 1024
step_val = 500
learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6
weight_decay = 1e-6
jitter = 0.0
jitter_val = 0.0
rotation_range = [0, math.pi, 0, 'u']
rotation_range_val = [0, 0, 0, 'u']
order = 'rxyz'
scaling_range = [0.1, 0.1, 0.1, 'g']
scaling_range_val = [0, 0, 0, 'u']
sample_num_variance = 1 // 8
sample_num_clip = 1 // 4
x = 3
xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 16 * x, []),
(12, 2, 384, 32 * x, []),
(16, 2, 128, 64 * x, []),
(16, 3, 128, 128 * x, [])]]
with_global = True
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(128 * x, 0.0),
(64 * x, 0.8)]]
sampling = 'random'
optimizer = 'adam'
epsilon = 1e-2
data_dim = 6
use_extra_features = False
with_X_transformation = True
sorting_method = None
| 18.901408
| 76
| 0.628912
|
4a064256f14cd7da6d635daad45d57eb33a3418a
| 1,210
|
py
|
Python
|
bindings/python/examples/domain_point.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 555
|
2015-01-19T07:50:27.000Z
|
2022-03-22T11:35:48.000Z
|
bindings/python/examples/domain_point.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 1,157
|
2015-01-07T18:34:23.000Z
|
2022-03-31T19:45:27.000Z
|
bindings/python/examples/domain_point.py
|
mmccarty/legion
|
30e00fa6016527c4cf60025a461fb7865f8def6b
|
[
"Apache-2.0"
] | 145
|
2015-02-03T02:31:42.000Z
|
2022-02-28T12:03:51.000Z
|
#!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, DomainPoint, RW
import numpy as np
@task
def main():
d1 = DomainPoint([1])
d2 = DomainPoint([1, 2])
d3 = DomainPoint([1, 2, 3])
print(d1, repr(d1), d1.point)
print(d2, repr(d2), d2.point)
print(d3, repr(d3), d3.point)
assert np.array_equal(d1.point, [1])
assert np.array_equal(d2.point, [1, 2])
assert np.array_equal(d3.point, [1, 2, 3])
assert d1 == DomainPoint([1])
assert d1 != DomainPoint([0])
assert d1 != DomainPoint([1, 2])
if __name__ == '__main__':
main()
| 27.5
| 74
| 0.694215
|
4a06429d1f73a3cb154a2f3d118a3f5961e9cb1e
| 10,308
|
py
|
Python
|
bconv.py
|
emattei/methylCtools
|
cbbc26d6127f64daeb522ec817232148222b3fa6
|
[
"MIT"
] | 1
|
2018-07-17T16:59:46.000Z
|
2018-07-17T16:59:46.000Z
|
bconv.py
|
emattei/methylCtools
|
cbbc26d6127f64daeb522ec817232148222b3fa6
|
[
"MIT"
] | null | null | null |
bconv.py
|
emattei/methylCtools
|
cbbc26d6127f64daeb522ec817232148222b3fa6
|
[
"MIT"
] | 3
|
2019-02-08T10:55:07.000Z
|
2020-09-09T22:23:22.000Z
|
#!/usr/bin/env python
#######################################
# methylCtools bconv
# v1.0.0
# 10 june 2018
#
# volker hovestadt
# developed at the german cancer research center, 2011-2015
# methylctools@hovestadt.bio
#
#
# reads are converted back to original state. positions are stored in read id.
# if a read (pair) maps illegally (reverse to Watson strand/forward to Crick
# strand, assuming strand specific protocol) it is set QCfail (flag +512).
# if there are more than INT non-CpG unconverted cytosines within the aligned part
# of a read, the read/pair is set QCfail.
#
# input must be in BAM format (unsorted (!) bwa output, use "-" for stdin).
# output is in BAM format, use "-" for stdout.
def mod_bconv(sysargv):
import sys
import argparse
import pysam
import re
import datetime
def nicetime(): return datetime.datetime.now().strftime("[bconv %Y-%m-%d %H:%M:%S]")
#######################################
# arguments, filehandles
parser = argparse.ArgumentParser(prog="methylCtools bconv", version="0.9.4", description="re-converts reads to original state following bwa alignment")
parser.add_argument("-s", "--silent", dest="qf", action="store_false", help="do not show status messages")
parser.add_argument("-m", "--metrics", metavar="metrics.txt", dest="outMETRICS", action="store", default=False, help="write metrics output")
groupinput = parser.add_argument_group("input files, required")
groupinput.add_argument("inBAM", metavar="aln.conv.bam", action="store", default=False, help="bwa alignment (unsorted), \"-\" for stdin")
groupoutput = parser.add_argument_group("output files, will be created")
groupoutput.add_argument("outBAM", metavar="aln.bam", action="store", default=False, help="converted alignment, \"-\" for stdout")
parser.add_argument("-u", "--maxunconv", metavar="INT", dest="maxnCGc", type=int, default=False, action="store", help="set reads containing > INT non-CG unconverted cytosines qc-fail [off]")
args = parser.parse_args(sysargv)
try:
samfileIN = pysam.Samfile(args.inBAM, "rb")
h = samfileIN.header # construct new header (merge alignments from Watson and Crick strand of same contig)
hseqnew = []
for tid in range(len(h["SQ"])): # header should be in the order of reference file generated by faconv, otherwise exit
if tid%2==0:
if h["SQ"][tid]["SN"][-2:] != "_W": sys.exit("methylCtools bconv: error: invalid BAM header")
hseqnew.append(h["SQ"][tid])
hseqnew[-1]["SN"] = h["SQ"][tid]["SN"][:-2]
else:
if h["SQ"][tid]["SN"][-2:] != "_C": sys.exit("methylCtools bconv: error: invalid BAM header")
h["SQ"] = hseqnew
if args.outBAM == "-":
samfileOUT = pysam.Samfile(args.outBAM, "wbu", header=h)
else:
samfileOUT = pysam.Samfile(args.outBAM, "wb", header=h)
if args.outMETRICS:
metricsOUT = open(args.outMETRICS, "w")
except IOError as strerror:
sys.exit("methylCtools bconv: error: %s" % strerror)
if args.qf: sys.stderr.write("%s command: %s\n" % (nicetime(), " ".join(sys.argv)))
#######################################
# define functions
def setQCfail(read):
if not read.is_qcfail:
read.flag += 512
c[6] += 1
return read
def convert(p, s, r): # p: conversion positions, s: sequence, r: is_reverse
pp = re.split("[C,G]", p)
if r: pp.reverse()
pi = 0
for i in pp[:-1]:
if i == "": i = 0
else: i = int(i)
pi += i+1
if s[pi-1] == "T": s = s[:pi-1] + "C" + s[pi:]
elif s[pi-1] == "A": s = s[:pi-1] + "G" + s[pi:]
else: sys.exit("methylCtools bconv: error: unexpected conversion")
return s
def changetid(t):
if t >= 0:
if t%2 == 0: return t/2
else: return (t-1)/2
else: return t
#######################################
# main
if args.qf: sys.stderr.write("%s start: processing alignments\n" % nicetime())
c = [0] *9 # [reads, paired, mapped, qcfail, proper, singleton, illegal, nCGc, isize<readlen]
isized = [0] *501 # insert size distribution
tidd = [0] *len(hseqnew)
while 1:
try:
read1 = samfileIN.next()
while read1.is_secondary: read1 = samfileIN.next() # do not process secondary alignments (bwa-mem)
except StopIteration: break
if read1.is_paired:
### PE read
try:
read2 = samfileIN.next()
while read2.is_secondary: read2 = samfileIN.next()
except StopIteration: break
# convert
read1Q, read2Q = read1.qname.split("."), read2.qname.split(".")
read1H = read1Q[1]
if len(read2Q) == 3: read2H = read2Q[2]
else: read2H = read2Q[1]
if read1Q[0] != read2Q[0]: sys.exit("methylCtools bconv: error: input read order: %s, %s" % (read1Q[0], read2Q[0]))
read1.qname, read2.qname = read1Q[0], read2Q[0] # restore original ID
read1qual, read2qual = read1.qual, read2.qual # pysam "bug": qual is reset when seq is changed
read1.seq = convert(read1H, read1.seq, read1.is_reverse)
read2.seq = convert(read2H, read2.seq, read2.is_reverse)
read1.qual, read2.qual = read1qual, read2qual
# check for illegal mapping
for read in [read1, read2]:
if not read.is_unmapped:
if read.tid%2 == 0:
if read.is_read1: # 99
if read.is_reverse:
setQCfail(read1), setQCfail(read2)
else: # 147
if not read.is_reverse:
setQCfail(read1), setQCfail(read2)
else:
if read.is_read1: # 83
if not read.is_reverse:
setQCfail(read1), setQCfail(read2)
else:
if read.is_reverse: # 163
setQCfail(read1), setQCfail(read2)
# check for unconverted non-CpGs
if not type(args.maxnCGc) == type(False):
nCGc1, nCGc2 = 0, 0
if not read1.is_unmapped:
if read1.is_read1 != read1.is_reverse: nCGc1 = len([s2 for s2 in re.findall("C[A,C,T]", read1.query)])
else: nCGc1 = len([s2 for s2 in re.findall("[A,G,T]G", read1.query)])
if not read2.is_unmapped:
if read2.is_read1 != read2.is_reverse: nCGc2 = len([s2 for s2 in re.findall("C[A,C,T]", read2.query)])
else: nCGc2 = len([s2 for s2 in re.findall("[A,G,T]G", read2.query)])
if nCGc1 > args.maxnCGc or nCGc2 > args.maxnCGc:
if not read1.is_qcfail: read1.flag += 512
if not read2.is_qcfail: read2.flag += 512
c[7] += 2
# change tid
read1.tid = changetid(read1.tid)
read1.mrnm = changetid(read1.mrnm)
read2.tid = changetid(read2.tid)
read2.mrnm = changetid(read2.mrnm)
# write
samfileOUT.write(read1)
samfileOUT.write(read2)
# metrics
c[0] += 2 # reads
if args.outMETRICS:
c[1] += 2 # paired
for read in [read1, read2]:
if read.is_proper_pair:
c[4] += 1 # proper pair
c[2] += 1
tidd[read.tid] += 1
elif not read.is_unmapped:
c[2] += 1 # mapped
if read.mate_is_unmapped: c[5] += 1 # singleton
elif read.tid == read.mrnm and read.pos == read.mpos: c[8] += 1 # isize < read length
tidd[read.tid] += 1
if not read.isize == 0 and abs(read.isize) <= 500:
isized[abs(read.isize)] += 1 # isize
if read.is_qcfail: c[3] += 1 # qcfail
if args.qf and c[0]%10**6 <= 1: sys.stderr.write("%s status: %i alignments processed\n" % (nicetime(), c[0]))
else:
### SE read
if read1.is_secondary: continue
# convert
read1Q = read1.qname.split(".")
read1H = read1Q[1]
read1.qname = read1Q[0] # restore original ID
read1qual = read1.qual # pysam "bug": qual is reset when seq is changed
read1.seq = convert(read1H, read1.seq, read1.is_reverse)
read1.qual = read1qual
# check for illegal mapping
if not read1.is_unmapped:
if read1.tid%2 == 0:
if read1.is_reverse: # 0
setQCfail(read1)
else:
if not read1.is_reverse: # 16
setQCfail(read1)
# check for unconverted non-CpGs
if not type(args.maxnCGc) == type(False):
nCGc1 = 0
if not read1.is_unmapped:
if not read1.is_reverse: nCGc1 = len([s2 for s2 in re.findall("C[A,C,T]", read1.query)])
else: nCGc1 = len([s2 for s2 in re.findall("[A,G,T]G", read1.query)])
if nCGc1 > args.maxnCGc:
if not read1.is_qcfail: read1.flag += 512
c[7] += 1
# change tid
read1.tid = changetid(read1.tid)
read1.mrnm = changetid(read1.mrnm)
# write
samfileOUT.write(read1)
# metrics
c[0] += 1 # reads
if args.outMETRICS:
if not read1.is_unmapped:
c[2] += 1 # mapped
tidd[read1.tid] += 1 # tid
if read1.is_qcfail: c[3] += 1 # qcfail
if args.qf and c[0]%10**6 == 0: sys.stderr.write("%s status: %i alignments processed\n" % (nicetime(), c[0]))
#######################################
# end
if args.outMETRICS:
metricsOUT.write("# flag statistics\n")
metricsOUT.write("%10i in total\n" % c[0])
metricsOUT.write("%10i paired in sequencing\n\n" % c[1])
metricsOUT.write("%10i mapped (%.2f%%)\n" % (c[2], c[2]/float(c[0])*100))
metricsOUT.write("%10i properly paired (%.2f%%)\n" % (c[4], c[4]/float(c[0])*100))
metricsOUT.write("%10i isize smaller read length (%.2f%%)\n" % (c[8], c[8]/float(c[0])*100))
metricsOUT.write("%10i singletons (%.2f%%)\n\n" % (c[5], c[5]/float(c[0])*100))
if not type(args.maxnCGc) == type(False):
metricsOUT.write("%10i QC-fail (%.2f%%)\n" % (c[3], c[3]/float(c[0])*100))
metricsOUT.write("%10i illegally mapped (%.2f%%)\n" % (c[6], c[6]/float(c[0])*100))
metricsOUT.write("%10i >%i non-CG Cs (%.2f%%)\n" % (c[7], args.maxnCGc, c[7]/float(c[0])*100))
else:
metricsOUT.write("%10i illegally mapped (%.2f%%)\n" % (c[3], c[3]/float(c[0])*100))
metricsOUT.write("\n# mapped reads per contig\n")
for tid in range(len(tidd)):
metricsOUT.write("%10i %s (%.2f%%)\n" % (tidd[tid], samfileOUT.getrname(tid), (tidd[tid]/float(c[2])*100)))
metricsOUT.write("\n# insert size distribution\n")
for isize in range(len(isized)):
metricsOUT.write("%10i %i\n" % (isized[isize], isize))
metricsOUT.close()
samfileIN.close()
samfileOUT.close()
if args.qf: sys.stderr.write("%s end: %i alignments processed\n" % (nicetime(), c[0]))
if __name__ == "__main__":
import sys
mod_bconv(sys.argv[1:])
| 35.180887
| 191
| 0.606907
|
4a0642e0d9d0c6b5e1f4e2e8c133cfe0b67e21cd
| 1,198
|
py
|
Python
|
code/crawler/crawl_book_content.py
|
CoolPhilChen/SE305_DBST_Project
|
9a7c58695783a5035e1c1c6b83384701ba702673
|
[
"MIT"
] | 6
|
2019-04-10T01:19:57.000Z
|
2020-02-24T04:37:54.000Z
|
code/crawler/crawl_book_content.py
|
CoolPhilChen/SE305_DBST_Project
|
9a7c58695783a5035e1c1c6b83384701ba702673
|
[
"MIT"
] | null | null | null |
code/crawler/crawl_book_content.py
|
CoolPhilChen/SE305_DBST_Project
|
9a7c58695783a5035e1c1c6b83384701ba702673
|
[
"MIT"
] | null | null | null |
import urllib.request, queue, time
import threading
class ThreadManager():
def __init__(self, thread_num):
self.lock = threading.Lock()
self.to_crawl = queue.Queue()
self.max_count_num = 59000
self.end_flag = False
for i in range(self.max_count_num):
self.to_crawl.put(i)
for i in range(thread_num):
t = threading.Thread(target=self.working)
t.start()
def working(self):
while not self.to_crawl.empty():
num = self.to_crawl.get()
if num%100 == 0:
print("handled to : ", num)
url = "https://www.gutenberg.org/files/"+ str(num) + "/" + str(num) + "-h/" + str(num) + "-h.htm"
try:
resp=urllib.request.urlopen(url)
html=resp.read()
with open('online_html/'+ str(num) + ".html", 'wb') as f:
f.write(html)
except:
pass
if self.lock.acquire():
self.end_flag = True
self.lock.release()
t = ThreadManager(100)
while not t.end_flag:
time.sleep(10)
| 24.958333
| 110
| 0.496661
|
4a0643cfbcffe2a4d1bd4e43477fd4efbf6fead0
| 2,555
|
py
|
Python
|
pdm/cli/commands/search.py
|
nasyxx/pdm
|
229e4552c800db0241d9d6e9140256c4bcce2a6d
|
[
"MIT"
] | null | null | null |
pdm/cli/commands/search.py
|
nasyxx/pdm
|
229e4552c800db0241d9d6e9140256c4bcce2a6d
|
[
"MIT"
] | null | null | null |
pdm/cli/commands/search.py
|
nasyxx/pdm
|
229e4552c800db0241d9d6e9140256c4bcce2a6d
|
[
"MIT"
] | null | null | null |
import argparse
import sys
import textwrap
from shutil import get_terminal_size
from typing import Optional
from pip._vendor.pkg_resources import safe_name
from pdm import termui
from pdm._types import SearchResult
from pdm.cli.commands.base import BaseCommand
from pdm.cli.options import verbose_option
from pdm.models.environment import WorkingSet
from pdm.project import Project
def print_results(
ui: termui.UI,
hits: SearchResult,
working_set: WorkingSet,
terminal_width: Optional[int] = None,
) -> None:
if not hits:
return
name_column_width = max(len(hit.name) + len(hit.version or "") for hit in hits) + 4
for hit in hits:
name = hit.name
summary = hit.summary or ""
latest = hit.version or ""
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary = ("\n" + " " * (name_column_width + 2)).join(
textwrap.wrap(summary, target_width)
)
current_width = len(name) + len(latest) + 4
spaces = " " * (name_column_width - current_width)
line = "{name} ({latest}){spaces} - {summary}".format(
name=termui.green(name, bold=True),
latest=termui.yellow(latest),
spaces=spaces,
summary=summary,
)
try:
ui.echo(line)
if safe_name(name).lower() in working_set:
dist = working_set[safe_name(name).lower()]
if dist.version == latest:
ui.echo(" INSTALLED: %s (latest)" % dist.version)
else:
ui.echo(" INSTALLED: %s" % dist.version)
ui.echo(" LATEST: %s" % latest)
except UnicodeEncodeError:
pass
class Command(BaseCommand):
"""Search for PyPI packages"""
arguments = [verbose_option]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument("query", help="Query string to search")
def handle(self, project: Project, options: argparse.Namespace) -> None:
result = project.get_repository().search(options.query)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(
project.core.ui,
result,
project.environment.get_working_set(),
terminal_width,
)
| 32.75641
| 87
| 0.6
|
4a0643d06bae049941018a32c73e9af16e54ff3e
| 4,559
|
py
|
Python
|
samples/2D_mixing_bubble/dim_case.py
|
henryleberre/MFC-develop
|
59e94af2d4304b41c9b52280dfd99a300e2664ec
|
[
"MIT"
] | 2
|
2022-02-26T16:00:42.000Z
|
2022-02-28T23:25:06.000Z
|
samples/2D_mixing_bubble/dim_case.py
|
henryleberre/MFC-develop
|
59e94af2d4304b41c9b52280dfd99a300e2664ec
|
[
"MIT"
] | 44
|
2021-12-07T04:36:24.000Z
|
2022-03-30T16:29:00.000Z
|
samples/2D_mixing_bubble/dim_case.py
|
henryleberre/MFC
|
0638f7766ead02a69c386d2acd25af8b85f7d194
|
[
"MIT"
] | 2
|
2021-12-07T18:59:08.000Z
|
2021-12-07T19:37:14.000Z
|
#!/usr/bin/env python2
import json
# Configuring case dictionary
print(json.dumps({
# Logistics ================================================
'case_dir' : '\'.\'',
'run_time_info' : 'T',
# ==========================================================
# Computational Domain Parameters ==========================
'x_domain%beg' : -0.500000000000000E+00,
'x_domain%end' : 0.500000000000000E+00,
'y_domain%beg' : -0.250000000000000E+00,
'y_domain%end' : 0.250000000000000E+00,
'm' : 319,
'n' : 159,
'p' : 0,
'dt' : 10.000000000000000E-7,
't_step_start' : 0,
't_step_stop' : 4E6,
't_step_save' : 1E4,
# ==========================================================
# Simulation Algorithm Parameters ==========================
'model_eqns' : 2,
'num_fluids' : 2,
'num_patches' : 2,
'adv_alphan' : 'T',
'mpp_lim' : 'T',
'anti_diffusion' : 'F',
'mixture_err' : 'T',
'time_stepper' : 3,
'weno_vars' : 2,
'weno_order' : 5,
'weno_eps' : 1.00000000000000E-16,
'mapped_weno' :'T',
'riemann_solver' : 2,
'wave_speeds' : 1,
'avg_state' : 2,
'bc_x%beg' :-1,
'bc_x%end' :-1,
'bc_y%beg' :-5,
'bc_y%end' :-5,
# ==========================================================
# Formatted Database Files Structure Parameters ============
'format' : 1,
'precision' : 2,
'prim_vars_wrt' :'T',
'parallel_io' :'T',
# ==========================================================
# Patch 1: initialize entire domain ========================
'patch_icpp(2)%geometry' : 3,
'patch_icpp(2)%x_centroid' : 0.00000000000000E+00,
'patch_icpp(2)%y_centroid' : 0.00000000000000E+00,
'patch_icpp(2)%length_x' : 2.00000000000000E+00,
'patch_icpp(2)%length_y' : 2.00000000000000E+00,
'patch_icpp(2)%vel(1)' : 1.000000000000E+00,
'patch_icpp(2)%vel(2)' : 0.00000000000000E+00,
'patch_icpp(2)%pres' : 1.01325000000000E+05,
'patch_icpp(2)%alpha_rho(1)' : 1000.00000000000E+00,
'patch_icpp(2)%alpha_rho(2)' : 1000.*1E-12,
'patch_icpp(2)%alpha(1)' : 1.00000000000000E+00-1.E-12,
'patch_icpp(2)%alpha(2)' : 1.E-12,
# ==========================================================
# Patch 2: overwrite lower half plane ======================
'patch_icpp(1)%geometry' : 4,
'patch_icpp(1)%x_centroid' : 0.00000000000000E+00,
'patch_icpp(1)%y_centroid' : 0.00000000000000E+00,
#'patch_icpp(1)%length_x' : 1.00000000000000E+00,
#'patch_icpp(1)%length_y' : 0.50000000000000E+00,
'patch_icpp(1)%normal(1)' : 0.00624987793326E+00,
'patch_icpp(1)%normal(2)' :-0.99998046932219E+00,
#'patch_icpp(1)%smooth_patch_id': 1,
#'patch_icpp(1)%smooth_coeff' : 1.00000000000000E+00,
'patch_icpp(1)%vel(1)' :-1.000000000000E+00,
'patch_icpp(1)%vel(2)' : 0.00000000000000E+00,
'patch_icpp(1)%pres' : 1.01325000000000E+05,
'patch_icpp(1)%alpha_rho(1)' : 1000*1.E-12,
'patch_icpp(1)%alpha_rho(2)' : 1000.000000000000E+00,
'patch_icpp(1)%alpha(1)' : 1.00000000000000E-12,
'patch_icpp(1)%alpha(2)' : 1-1.00000000000000E-12,
# ==========================================================
# Fluids Physical Parameters ===============================
'fluid_pp(1)%gamma' : 1.0E+00/(4.4E+00-1.0E+00),
'fluid_pp(1)%pi_inf' : 4.4E+00*6.0E+08/(4.4E+00-1.E+00),
'fluid_pp(2)%gamma' : 1.0E+00/(4.4E+00-1.0E+00),
'fluid_pp(2)%pi_inf' : 4.4E+00*6.0E+08/(4.4E+00-1.E+00),
# ==========================================================
}))
# ==============================================================================
| 46.520408
| 80
| 0.395701
|
4a064452a0c1b467e6c8f2c6f9c5585c26ff7224
| 637
|
py
|
Python
|
test-runner/adapters/rest/generated/e2erestapi/__init__.py
|
brycewang-microsoft/iot-sdks-e2e-fx
|
211c9c2615a82076bda02a27152d67366755edbf
|
[
"MIT"
] | 12
|
2019-02-02T00:15:13.000Z
|
2022-02-08T18:20:08.000Z
|
test-runner/adapters/rest/generated/e2erestapi/__init__.py
|
brycewang-microsoft/iot-sdks-e2e-fx
|
211c9c2615a82076bda02a27152d67366755edbf
|
[
"MIT"
] | 36
|
2019-02-14T22:53:17.000Z
|
2022-03-22T22:41:38.000Z
|
test-runner/adapters/rest/generated/e2erestapi/__init__.py
|
brycewang-microsoft/iot-sdks-e2e-fx
|
211c9c2615a82076bda02a27152d67366755edbf
|
[
"MIT"
] | 12
|
2019-02-19T13:28:25.000Z
|
2022-02-08T18:20:55.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import AzureIOTEndToEndTestWrapperRestApiConfiguration
from ._azure_iot_end_to_end_test_wrapper_rest_api import AzureIOTEndToEndTestWrapperRestApi
__all__ = ['AzureIOTEndToEndTestWrapperRestApi', 'AzureIOTEndToEndTestWrapperRestApiConfiguration']
from .version import VERSION
__version__ = VERSION
| 39.8125
| 99
| 0.645212
|
4a0644ee570af0e1d8978a3d4e7f692eee4e59ef
| 442
|
py
|
Python
|
src/evidently/analyzers/stattests/__init__.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
src/evidently/analyzers/stattests/__init__.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
src/evidently/analyzers/stattests/__init__.py
|
Tapot/evidently
|
ab9b91425d622566b663565508dd1c43e741f515
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from .ks_stattest import ks_stat_test
from .z_stattest import z_stat_test
from .chisquare_stattest import chi_stat_test
from .jensenshannon import jensenshannon_stat_test
from .kl_div import kl_div_stat_test
from .psi import psi_stat_test
from .wasserstein_distance_norm import wasserstein_stat_test
from .registry import get_stattest, register_stattest, StatTest, PossibleStatTestType, StatTestFuncType
| 40.181818
| 103
| 0.861991
|
4a06456e669b0743e3d9ee55d17880bb85bd0c15
| 15,023
|
py
|
Python
|
openmdao/solvers/linear/direct.py
|
cfe316/OpenMDAO
|
f8881db590d7dc873fb77de2504280c2ed177fbf
|
[
"Apache-2.0"
] | null | null | null |
openmdao/solvers/linear/direct.py
|
cfe316/OpenMDAO
|
f8881db590d7dc873fb77de2504280c2ed177fbf
|
[
"Apache-2.0"
] | 1
|
2022-02-22T21:14:40.000Z
|
2022-02-22T21:14:40.000Z
|
openmdao/solvers/linear/direct.py
|
tadkollar/OpenMDAO
|
f8881db590d7dc873fb77de2504280c2ed177fbf
|
[
"Apache-2.0"
] | null | null | null |
"""LinearSolver that uses linalg.solve or LU factor/solve."""
import warnings
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse import csc_matrix
from openmdao.solvers.solver import LinearSolver
from openmdao.matrices.dense_matrix import DenseMatrix
def index_to_varname(system, loc):
"""
Given a matrix location, return the name of the variable associated with that index.
Parameters
----------
system : <System>
System containing the Directsolver.
loc : int
Index of row or column.
Returns
-------
str
String containing variable absolute name (and promoted name if there is one) and index.
"""
start = end = 0
varsizes = np.sum(system._owned_sizes, axis=0)
for i, name in enumerate(system._var_allprocs_abs2meta['output']):
end += varsizes[i]
if loc < end:
varname = system._var_allprocs_abs2prom['output'][name]
break
start = end
if varname == name:
name_string = "'{}' index {}.".format(varname, loc - start)
else:
name_string = "'{}' ('{}') index {}.".format(varname, name, loc - start)
return name_string
def loc_to_error_msg(system, loc_txt, loc):
"""
Given a matrix location, format a coherent error message when matrix is singular.
Parameters
----------
system : <System>
System containing the Directsolver.
loc_txt : str
Either 'row' or 'col'.
loc : int
Index of row or column.
Returns
-------
str
New error string.
"""
names = index_to_varname(system, loc)
msg = "Singular entry found in {} for {} associated with state/residual " + names
return msg.format(system.msginfo, loc_txt)
def format_singular_error(system, matrix):
"""
Format a coherent error message for any ill-conditioned mmatrix.
Parameters
----------
system : <System>
System containing the Directsolver.
matrix : ndarray
Matrix of interest.
Returns
-------
str
New error string.
"""
if scipy.sparse.issparse(matrix):
matrix = matrix.toarray()
if np.any(np.isnan(matrix)):
# There is a nan in the matrix.
return(format_nan_error(system, matrix))
zero_rows = np.where(~matrix.any(axis=1))[0]
zero_cols = np.where(~matrix.any(axis=0))[0]
if zero_cols.size <= zero_rows.size:
if zero_rows.size == 0:
# In this case, some row is a linear combination of the other rows.
# SVD gives us some information that may help locate the source of the problem.
u, _, _ = np.linalg.svd(matrix)
# Nonzero elements in the left singular vector show the rows that contribute strongly to
# the singular subspace. Note that sometimes extra rows/cols are included in the set,
# currently don't have a good way to pare them down.
tol = 1e-15
u_sing = np.abs(u[:, -1])
left_idx = np.where(u_sing > tol)[0]
msg = "Jacobian in '{}' is not full rank. The following set of states/residuals " + \
"contains one or more equations that is a linear combination of the others: \n"
for loc in left_idx:
name = index_to_varname(system, loc)
msg += ' ' + name + '\n'
if len(left_idx) > 2:
msg += "Note that the problem may be in a single Component."
return msg.format(system.pathname)
loc_txt = "row"
loc = zero_rows[0]
else:
loc_txt = "column"
loc = zero_cols[0]
return loc_to_error_msg(system, loc_txt, loc)
def format_nan_error(system, matrix):
"""
Format a coherent error message when the matrix contains NaN.
Parameters
----------
system : <System>
System containing the Directsolver.
matrix : ndarray
Matrix of interest.
Returns
-------
str
New error string.
"""
# Because of how we built the matrix, a NaN in a comp cause the whole row to be NaN, so we
# need to associate each index with a variable.
varsizes = np.sum(system._owned_sizes, axis=0)
nanrows = np.zeros(matrix.shape[0], dtype=bool)
nanrows[np.where(np.isnan(matrix))[0]] = True
varnames = []
start = end = 0
for i, name in enumerate(system._var_allprocs_abs2meta['output']):
end += varsizes[i]
if np.any(nanrows[start:end]):
varnames.append("'%s'" % system._var_allprocs_abs2prom['output'][name])
start = end
msg = "NaN entries found in {} for rows associated with states/residuals [{}]."
return msg.format(system.msginfo, ', '.join(varnames))
class DirectSolver(LinearSolver):
"""
LinearSolver that uses linalg.solve or LU factor/solve.
Parameters
----------
**kwargs : dict
Options dictionary.
"""
SOLVER = 'LN: Direct'
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super()._declare_options()
self.options.declare('err_on_singular', types=bool, default=True,
desc="Raise an error if LU decomposition is singular.")
# this solver does not iterate
self.options.undeclare("maxiter")
self.options.undeclare("err_on_non_converge")
self.options.undeclare("atol")
self.options.undeclare("rtol")
# Use an assembled jacobian by default.
self.options['assemble_jac'] = True
def _setup_solvers(self, system, depth):
"""
Assign system instance, set depth, and optionally perform setup.
Parameters
----------
system : <System>
pointer to the owning system.
depth : int
depth of the current system (already incremented).
"""
super()._setup_solvers(system, depth)
self._disallow_distrib_solve()
def _linearize_children(self):
"""
Return a flag that is True when we need to call linearize on our subsystems' solvers.
Returns
-------
boolean
Flag for indicating child linearization.
"""
return False
def _build_mtx(self):
"""
Assemble a Jacobian matrix by matrix-vector-product with columns of identity.
Returns
-------
ndarray
Jacobian matrix.
"""
system = self._system()
bvec = system._vectors['residual']['linear']
xvec = system._vectors['output']['linear']
# First make a backup of the vectors
b_data = bvec.asarray(copy=True)
x_data = xvec.asarray(copy=True)
nmtx = x_data.size
seed = np.zeros(x_data.size)
mtx = np.empty((nmtx, nmtx), dtype=b_data.dtype)
scope_out, scope_in = system._get_scope()
# Assemble the Jacobian by running the identity matrix through apply_linear
for i in range(nmtx):
# set value of x vector to provided value
seed[i - 1] = 0.
seed[i] = 1.
xvec.set_val(seed)
# apply linear
system._apply_linear(self._assembled_jac, self._rel_systems, 'fwd',
scope_out, scope_in)
# put new value in out_vec
mtx[:, i] = bvec.asarray()
# Restore the backed-up vectors
bvec.set_val(b_data)
xvec.set_val(x_data)
return mtx
def _linearize(self):
"""
Perform factorization.
"""
system = self._system()
nproc = system.comm.size
if self._assembled_jac is not None:
matrix = self._assembled_jac._int_mtx._matrix
if matrix is None:
# this happens if we're not rank 0 when using owned_sizes
self._lu = self._lup = None
# Perform dense or sparse lu factorization.
elif isinstance(matrix, csc_matrix):
try:
self._lu = scipy.sparse.linalg.splu(matrix)
except RuntimeError as err:
if 'exactly singular' in str(err):
raise RuntimeError(format_singular_error(system, matrix))
else:
raise err
elif isinstance(matrix, np.ndarray): # dense
# During LU decomposition, detect singularities and warn user.
with warnings.catch_warnings():
if self.options['err_on_singular']:
warnings.simplefilter('error', RuntimeWarning)
try:
self._lup = scipy.linalg.lu_factor(matrix)
except RuntimeWarning as err:
raise RuntimeError(format_singular_error(system, matrix))
# NaN in matrix.
except ValueError as err:
raise RuntimeError(format_nan_error(system, matrix))
# Note: calling scipy.sparse.linalg.splu on a COO actually transposes
# the matrix during conversion to csc prior to LU decomp, so we can't use COO.
else:
raise RuntimeError("Direct solver not implemented for matrix type %s"
" in %s." % (type(self._assembled_jac._int_mtx),
system.msginfo))
else:
if nproc > 1:
raise RuntimeError("DirectSolvers without an assembled jacobian are not supported "
"when running under MPI if comm.size > 1.")
mtx = self._build_mtx()
# During LU decomposition, detect singularities and warn user.
with warnings.catch_warnings():
if self.options['err_on_singular']:
warnings.simplefilter('error', RuntimeWarning)
try:
self._lup = scipy.linalg.lu_factor(mtx)
except RuntimeWarning as err:
raise RuntimeError(format_singular_error(system, mtx))
# NaN in matrix.
except ValueError as err:
raise RuntimeError(format_nan_error(system, mtx))
def _inverse(self):
"""
Return the inverse Jacobian.
This is only used by the Broyden solver when calculating a full model Jacobian. Since it
is only done for a single RHS, no need for LU.
Returns
-------
ndarray
Inverse Jacobian.
"""
system = self._system()
iproc = system.comm.rank
nproc = system.comm.size
if self._assembled_jac is not None:
matrix = self._assembled_jac._int_mtx._matrix
if matrix is None:
# This happens if we're not rank 0 and owned_sizes are being used
sz = np.sum(system._owned_sizes)
inv_jac = np.zeros((sz, sz))
# Dense and Sparse matrices have their own inverse method.
elif isinstance(matrix, np.ndarray):
# Detect singularities and warn user.
with warnings.catch_warnings():
if self.options['err_on_singular']:
warnings.simplefilter('error', RuntimeWarning)
try:
inv_jac = scipy.linalg.inv(matrix)
except RuntimeWarning as err:
raise RuntimeError(format_singular_error(system, matrix))
# NaN in matrix.
except ValueError as err:
raise RuntimeError(format_nan_error(system, matrix))
elif isinstance(matrix, csc_matrix):
try:
inv_jac = scipy.sparse.linalg.inv(matrix)
except RuntimeError as err:
if 'exactly singular' in str(err):
raise RuntimeError(format_singular_error(system, matrix))
else:
raise err
# to prevent broadcasting errors later, make sure inv_jac is 2D
# scipy.sparse.linalg.inv returns a shape (1,) array if matrix is shape (1,1)
if inv_jac.size == 1:
inv_jac = inv_jac.reshape((1, 1))
else:
raise RuntimeError("Direct solver not implemented for matrix type %s"
" in %s." % (type(matrix), system.msginfo))
else:
if nproc > 1:
raise RuntimeError("BroydenSolvers without an assembled jacobian are not supported "
"when running under MPI if comm.size > 1.")
mtx = self._build_mtx()
# During inversion detect singularities and warn user.
with warnings.catch_warnings():
if self.options['err_on_singular']:
warnings.simplefilter('error', RuntimeWarning)
try:
inv_jac = scipy.linalg.inv(mtx)
except RuntimeWarning as err:
raise RuntimeError(format_singular_error(system, mtx))
# NaN in matrix.
except ValueError as err:
raise RuntimeError(format_nan_error(system, mtx))
return inv_jac
def solve(self, mode, rel_systems=None):
"""
Run the solver.
Parameters
----------
mode : str
'fwd' or 'rev'.
rel_systems : set of str
Names of systems relevant to the current solve.
"""
system = self._system()
iproc = system.comm.rank
nproc = system.comm.size
d_residuals = system._vectors['residual']['linear']
d_outputs = system._vectors['output']['linear']
# assign x and b vectors based on mode
if mode == 'fwd':
x_vec = d_outputs.asarray()
b_vec = d_residuals.asarray()
trans_lu = 0
trans_splu = 'N'
else: # rev
x_vec = d_residuals.asarray()
b_vec = d_outputs.asarray()
trans_lu = 1
trans_splu = 'T'
# AssembledJacobians are unscaled.
if self._assembled_jac is not None:
full_b = tmp = b_vec
with system._unscaled_context(outputs=[d_outputs], residuals=[d_residuals]):
if isinstance(self._assembled_jac._int_mtx, DenseMatrix):
arr = scipy.linalg.lu_solve(self._lup, full_b, trans=trans_lu)
else:
arr = self._lu.solve(full_b, trans_splu)
x_vec[:] = arr
# matrix-vector-product generated jacobians are scaled.
else:
x_vec[:] = scipy.linalg.lu_solve(self._lup, b_vec, trans=trans_lu)
| 32.873085
| 100
| 0.562271
|
4a06462527eff991a4802f5949cab5418611cace
| 18,320
|
py
|
Python
|
pyrobolearn/utils/inertia.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/utils/inertia.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/utils/inertia.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide functions to process inertia matrices and moments of inertia.
References:
- Moment of inertia (Wikipedia): https://en.wikipedia.org/wiki/Moment_of_inertia
- List of moments of inertia (Wikipedia): https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
import numpy as np
from pyrobolearn.utils.transformation import skew_matrix
from pyrobolearn.utils.mesh import get_mesh_body_inertia
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
def get_full_inertia(inertia):
r"""
Get the full inertia matrix from a partial one.
Args:
inertia (np.array[float[3,3]], np.array[float[9]], np.array[float[6]], np.array[float[3]]): body frame inertia
matrix relative to the center of mass. If 9 elements are given, these are assumed to be [ixx, ixy, ixz,
ixy, iyy, iyz, ixz, iyz, izz]. If 6 elements are given, they are assumed to be [ixx, ixy, ixz, iyy, iyz,
izz]. Finally, if only 3 elements are given, these are assumed to be [ixx, iyy, izz] and are considered
already to be the principal moments of inertia.
Returns:
np.array[float[3,3]]: full inertia matrix.
"""
# make sure inertia is a numpy array
inertia = np.asarray(inertia)
# get the full inertia matrix
if inertia.shape == (3,):
inertia = np.diag(inertia)
elif inertia.shape == (6,):
ixx, ixy, ixz, iyy, iyz, izz = inertia
inertia = np.array([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])
elif inertia.shape == (9,):
inertia = inertia.reshape(3, 3)
# check the shape
if inertia.shape != (3, 3):
raise ValueError("Expecting the inertia matrix to be of shape (3,), (6,), (9,) or (3,3), but got a shape of: "
"{}".format(inertia.shape))
return inertia
def get_principal_moments_and_axes_of_inertia(inertia, full=False):
r"""
Get the principal moments of inertia.
Given a body frame inertia matrix relative to the center of mass :math:`I^B_C`, it can be decomposed using
eigendecomposition into:
.. math:: I^B_C = Q \Sigma Q^T
where :math:`\Sigma = \text{diag}[I_1, I_2, I_3]` is a diagonal matrix :math:`\in \mathbb{R^{3 \times 3}}` where
the diagonal elements (:math:`I_1, I_2, I_3`) are called the principal moments of inertia, and the columns of
:math:`Q` are called the principal axes of the body.
Args:
inertia (np.array[float[3,3]], np.array[float[9]], np.array[float[6]], np.array[float[3]]): body frame inertia
matrix relative to the center of mass. If 9 elements are given, these are assumed to be [ixx, ixy, ixz,
ixy, iyy, iyz, ixz, iyz, izz]. If 6 elements are given, they are assumed to be [ixx, ixy, ixz, iyy, iyz,
izz]. Finally, if only 3 elements are given, these are assumed to be [ixx, iyy, izz] and are considered
already to be the principal moments of inertia.
full (bool): if we should return the principal moments of inertia as a full matrix or an array of 3 float.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
"""
# get full inertia
inertia = get_full_inertia(inertia)
# eigendecomposition
evals, evecs = np.linalg.eigh(inertia)
if full:
evals = np.diag(evals)
return evals, evecs
def get_principal_moments_of_inertia(inertia, full=False):
r"""
Get the principal moments of inertia.
Given a body frame inertia matrix relative to the center of mass :math:`I^B_C`, it can be decomposed using
eigendecomposition into:
.. math:: I^B_C = Q \Sigma Q^T
where :math:`\Sigma = \text{diag}[I_1, I_2, I_3]` is a diagonal matrix :math:`\in \mathbb{R^{3 \times 3}}` where
the diagonal elements (:math:`I_1, I_2, I_3`) are called the principal moments of inertia, and the columns of
:math:`Q` are called the principal axes of the body.
Args:
inertia (np.array[float[3,3]], np.array[float[9]], np.array[float[6]], np.array[float[3]]): body frame inertia
matrix relative to the center of mass. If 9 elements are given, these are assumed to be [ixx, ixy, ixz,
ixy, iyy, iyz, ixz, iyz, izz]. If 6 elements are given, they are assumed to be [ixx, ixy, ixz, iyy, iyz,
izz]. Finally, if only 3 elements are given, these are assumed to be [ixx, iyy, izz] and are considered
already to be the principal moments of inertia.
full (bool): if we should return the principal moments of inertia as a full matrix or an array of 3 float.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
"""
return get_principal_moments_and_axes_of_inertia(inertia, full)[0]
def get_principal_axes_of_inertia(inertia):
r"""
Get the principal moments of inertia.
Given a body frame inertia matrix relative to the center of mass :math:`I^B_C`, it can be decomposed using
eigendecomposition into:
.. math:: I^B_C = Q \Sigma Q^T
where :math:`\Sigma = \text{diag}[I_1, I_2, I_3]` is a diagonal matrix :math:`\in \mathbb{R^{3 \times 3}}` where
the diagonal elements (:math:`I_1, I_2, I_3`) are called the principal moments of inertia, and the columns of
:math:`Q` are called the principal axes of the body.
Args:
inertia (np.array[float[3,3]], np.array[float[9]], np.array[float[6]], np.array[float[3]]): body frame inertia
matrix relative to the center of mass. If 9 elements are given, these are assumed to be [ixx, ixy, ixz,
ixy, iyy, iyz, ixz, iyz, izz]. If 6 elements are given, they are assumed to be [ixx, ixy, ixz, iyy, iyz,
izz]. Finally, if only 3 elements are given, these are assumed to be [ixx, iyy, izz] and are considered
already to be the principal moments of inertia.
Returns:
np.array[float[3,3]]: principal axes of the body inertia.
"""
return get_principal_moments_and_axes_of_inertia(inertia, full=False)[1]
def translate_inertia_matrix(inertia, vector, mass):
r"""
"The inertia matrix of a body depends on the choice of the reference point. There is a useful relationship between
the inertia matrix relative to the center of mass C and the inertia matrix relative to the another point. This
relationship is called the parallel axis theorem" [1].
The result is given by:
.. math:: I_R = I_C - M [d]^2
where :math:`I_R \in \mathbb{R}^{3 \times 3}` is the inertia matrix relative to the point :math:`R`,
:math:`I_C \in \mathbb{R}^{3 \times 3}` is the inertia matrix relative to the center of mass :math:`C`,
:math:`M \in \mathbb{R}` is the total mass of the system, :math:`d \in \mathbb{R}^3` is the vector from the
center of mass :math:`C` to the reference point :math:`R`, and :math:`[\cdot]` is the operation which transforms
a vector into a skew-symmetric matrix.
Args:
inertia (np.array[float[3,3]]): full inertia matrix of a body around its CoM.
vector (np.array[float[3]]): translation vector from the CoM to another point.
mass (float): the total mass of the body.
Returns:
np.array[float[3,3]]: full inertia matrix of a body relative to another point.
References:
- [1] Parallel axis theorem (Wikipedia): https://en.wikipedia.org/wiki/Moment_of_inertia#Parallel_axis_theorem
"""
d = skew_matrix(vector)
return inertia - mass * d**2
def rotate_inertia_matrix(inertia, rotation):
r"""
Rotate the inertia matrix.
Assuming a rotation matrix :math:`R` that defines the body frame orientation with respect to an inertial frame,
and thus maps a vector :math:`x` described in the body fixed coordinate frame to the coordinates in the inertial
frame :math:`y = R x`, the inertia matrix in the inertial frame is given by:
.. math:: I_C = R I^B_C R^\top
where :math:`R \in \mathbb{R}^{3 \times 3}` is the rotation matrix that represents the orientation of the body
frame relative to an inertial frame (it can depend on the time, i.e. :math:`R(t)`), and :math:`I^B_C` is the
inertia matrix of a body around its center of mass (this is constant over time), and :math:`I_C` is the inertia
matrix of the body measured in the inertial frame (this is dependent of the time, if the rotation is dependent of
the time).
Args:
inertia (np.array[float[3,3]]): full inertia matrix.
rotation (np.array[float[3,3]]): rotation matrix.
Returns:
np.array[float[3,3]]: rotated inertia matrix.
"""
return rotation.dot(inertia).dot(rotation.T)
def scale_inertia(inertia, scale=1):
r"""
Scale the inertia matrix.
The inertia is given by:
.. math:: I \sim m r^2
where :math:`I` is the inertia [kg m^2], :math:`m` is the mass [kg], and :math:`r` is distance [m].
If you scale :math:`r` from 1m to let's say 1mm thus a scaling factor of :math:`10^{-3}`, it has the effect that
the inertia will be scaled by a factor of :math:`10^{-6}` because of the squared operation.
Args:
inertia :
scale (float): scaling factor.
Returns:
np.array[float[3,3]]: full scaled inertia matrix.
"""
return inertia * scale**2
def get_inertia_of_sphere(mass, radius, full=False):
r"""
Return the principal moments of the inertia matrix of a sphere.
Args:
mass (float): mass of the sphere.
radius (float): radius of the sphere.
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
References:
- List of moments of inertia (Wikipedia): https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
inertia = 2./5 * mass * radius**2 * np.ones(3)
if full:
return np.diag(inertia)
return inertia
def get_inertia_of_box(mass, size, full=False):
r"""
Return the principal moments of the inertia matrix of a box/cuboid.
Args:
mass (float): mass of the box.
size (np.array[float[3]]): dimensions of the box/cuboid along the 3 axes (width, height, depth).
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
References:
- List of moments of inertia (Wikipedia): https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
w, h, d = size # width, height, depth
inertia = 1./12 * mass * np.array([h**2 + d**2, w**2 + d**2, w**2 + h**2])
if full:
return np.diag(inertia)
return inertia
def get_inertia_of_cylinder(mass, radius, height, full=False):
r"""
Return the principal moments of the inertia matrix of a cylinder.
Args:
mass (float): mass of the cylinder.
radius (float): radius of the cylinder.
height (float): height of the cylinder.
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
References:
- List of moments of inertia (Wikipedia): https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
r, h = radius, height
inertia = 1./12 * mass * np.array([3*r**2 + h**2, 3*r**2 + h**2, r**2])
if full:
return np.diag(inertia)
return inertia
def get_inertia_of_capsule(mass, radius, height, full=False):
r"""
Return the principal moments of the inertia matrix of a capsule.
Args:
mass (float): mass of the capsule.
radius (float): radius of the capsule (i.e. radius of the hemispheres).
height (float): height of the capsule.
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
References:
- https://www.gamedev.net/articles/programming/math-and-physics/capsule-inertia-tensor-r3856/
"""
r, h = radius, height
# get mass of cylinder and hemisphere
sphere_volume = 4. / 3 * np.pi * r ** 3
cylinder_volume = np.pi * r ** 2 * h
volume = sphere_volume + cylinder_volume
density = mass / volume
m_s = density * sphere_volume # sphere mass = 2 * hemisphere mass
m_c = density * cylinder_volume # cylinder mass
# from: https://www.gamedev.net/articles/programming/math-and-physics/capsule-inertia-tensor-r3856/
ixx = m_c * (h ** 2 / 12. + r ** 2 / 4.) + m_s * (2 * r ** 2 / 5. + h ** 2 / 2. + 3 * h * r / 8.)
iyy = ixx
izz = m_c * r ** 2 / 2. + m_s * 2 * r ** 2 / 5.
inertia = np.array([ixx, iyy, izz])
if full:
return np.diag(inertia)
return inertia
def get_inertia_of_ellipsoid(mass, a, b, c, full=False):
r"""
Return the principal moments of the inertia matrix of a ellipsoid.
Args:
mass (float): mass of the ellipsoid.
a (float): first semi-axis of the ellipsoid.
b (float): second semi-axis of the ellipsoid.
c (float): third semi-axis of the ellipsoid.
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
References:
- List of moments of inertia (Wikipedia): https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
inertia = 1. / 5 * mass * np.array([b ** 2 + c ** 2, a ** 2 + c ** 2, a ** 2 + b ** 2])
if full:
return np.diag(inertia)
return inertia
def get_inertia_of_mesh(mesh, mass=None, scale=1., density=1000, full=False):
r"""
Return the principal moments of the inertia matrix of a mesh.
Warnings: the mesh has to be watertight.
Args:
mesh (str, trimesh.Trimesh): path to the mesh file, or a Trimesh instance. Note that the mesh has to be
watertight.
mass (float, None): mass of the mesh (in kg). If None, it will use the density.
scale (float): scaling factor. If you have a mesh in meter but you want to scale it into centimeters, you need
to provide a scaling factor of 0.01.
density (float): density of the mesh (in kg/m^3). By default, it uses the density of the water 1000kg / m^3.
full (bool): if we should return the full inertia matrix, or just the diagonal elements.
Returns:
if full:
np.array[float[3,3]]: diagonal inertia matrix where the diagonal elements are the principal moments of
inertia.
else:
np.array[float[3]]: principal moments of inertia.
"""
inertia = get_mesh_body_inertia(mesh, mass=mass, density=density, scale=scale)
if full:
return np.diag(inertia)
return inertia
def combine_inertias(coms, masses, inertias, rotations=None):
r"""
This combines the inertia matrices together to form the combined body frame inertia matrix relative to the
combined center of mass.
Args:
coms (list[np.array[float[3]]): list of center of masses.
masses (list[float]): list of total body masses.
inertias (list[np.array[float[3,3]]]): list of body frame inertia matrices relative to their center of mass.
rotations (list[np.array[float[3,3]]]): list of rotation matrices where each rotation has to be applied on
the inertia matrix before translating it.
Returns:
float: total mass.
np.array[float[3]]: combined center of mass.
np.array[float[3,3]]: combined inertia matrix.
"""
if len(coms) != len(masses) or len(coms) != len(inertias):
raise ValueError("The given lists do not have the same length: len(coms)={}, len(masses)={}, "
"len(inertias)={}".format(len(coms), len(masses), len(inertias)))
if len(coms) == 0:
raise ValueError("Expecting the length of the provided parameters to be bigger than 0")
if rotations is not None and len(rotations) != len(coms):
raise ValueError("The given list of rotations do not have the same length (={}) as the other arguments (={})"
".".format(len(rotations), len(masses)))
total_mass = np.sum(masses)
new_com = np.sum([mass * com for mass, com in zip(masses, coms)], axis=0)
new_com /= total_mass
if rotations is None:
inertia = np.sum([translate_inertia_matrix(inertia, vector=new_com-com, mass=mass)
for mass, com, inertia in zip(masses, coms, inertias)], axis=0)
else:
inertia = np.sum([translate_inertia_matrix(rotate_inertia_matrix(inertia, rot), vector=new_com-com, mass=mass)
for mass, com, inertia, rot in zip(masses, coms, inertias, rotations)], axis=0)
return total_mass, new_com, inertia
| 41.076233
| 118
| 0.646834
|
4a06475597035e67196610eaa758f25ea6c0b272
| 1,368
|
py
|
Python
|
mozbadges/utils/decorators.py
|
Mozilla-GitHub-Standards/32efa1a1fe75882ab357bdb58d92207732a76c86f080bb0f12b4b3357b38899d
|
3b57876dcdf7bf9e8158b69acdb9648a7a401a49
|
[
"BSD-3-Clause"
] | 1
|
2017-01-13T03:56:04.000Z
|
2017-01-13T03:56:04.000Z
|
mozbadges/utils/decorators.py
|
Mozilla-GitHub-Standards/32efa1a1fe75882ab357bdb58d92207732a76c86f080bb0f12b4b3357b38899d
|
3b57876dcdf7bf9e8158b69acdb9648a7a401a49
|
[
"BSD-3-Clause"
] | 10
|
2019-03-29T04:59:00.000Z
|
2022-01-19T14:54:45.000Z
|
mozbadges/utils/decorators.py
|
Mozilla-GitHub-Standards/32efa1a1fe75882ab357bdb58d92207732a76c86f080bb0f12b4b3357b38899d
|
3b57876dcdf7bf9e8158b69acdb9648a7a401a49
|
[
"BSD-3-Clause"
] | 1
|
2019-03-29T04:59:02.000Z
|
2019-03-29T04:59:02.000Z
|
from django.http import HttpResponse
from django.shortcuts import render
def public_attributes (*args, **kwargs):
attributes = dict([(key, kwargs.get(key, key)) for key in (list(args) + kwargs.keys())])
def decorator (cls):
cls.__public__ = attributes
return cls
return decorator
def render_to(template_name):
def decorator(fn):
def view(request, *args, **kwargs):
response = fn(request, *args, **kwargs)
if response is None:
response = {}
if not isinstance(response, dict):
return response
return render(request, template_name, response)
return view
return decorator
def requires_user(fn=None, *attrs):
attrs += ('is_authenticated',)
def decorator(fn):
def view(request, *args, **kwargs):
user = request.user
valid = True
for attr in attrs:
value = valid and getattr(user, attr, False)
if callable(value):
value = value()
valid = valid and value
if valid:
return fn(request, *args, **kwargs)
return render(request, '401.html', {})
return view
if callable(fn):
return decorator(fn)
else:
attrs = (fn,) + attrs
return decorator
| 25.811321
| 92
| 0.556287
|
4a0647dec473100aacfcdd16d30c71833b0a1de3
| 2,332
|
py
|
Python
|
datman/yamltools.py
|
gabiherman/datman
|
dcbca4981ff7bb1be536d6c62c3b27786cabdef9
|
[
"Apache-2.0"
] | null | null | null |
datman/yamltools.py
|
gabiherman/datman
|
dcbca4981ff7bb1be536d6c62c3b27786cabdef9
|
[
"Apache-2.0"
] | null | null | null |
datman/yamltools.py
|
gabiherman/datman
|
dcbca4981ff7bb1be536d6c62c3b27786cabdef9
|
[
"Apache-2.0"
] | null | null | null |
"""
Some tools for interacting with YAML files.
"""
import yaml
import sys
import collections
def load_yaml(filename):
"""
Attempts to load a YAML file. Complains and exits if it fails.
"""
try:
with open(filename, 'r') as stream:
data = yaml.load(stream)
except:
print("ERROR: Supplied configuration file {} is not a properly-formatted YAML file.".format(filename))
sys.exit(1)
return data
def save_yaml(filename, data):
"""
Saves a YAML file.
"""
try:
with open(filename, 'w') as f:
yaml.dump(data, f, default_flow_style=False)
except:
print('ERROR: Do not have permissions to edit submitted YAML file.')
sys.exit(1)
def blacklist_series(filename, stage, series, message):
"""
Adds a series to the list of ignored for the defined stage of the pipeline in
the configuration file. It also appends a diagnostic message to the series.
"""
# kickflip to create a recursive defaultdict, and register it with pyyaml
tree = lambda: collections.defaultdict(tree)
yaml.add_representer(collections.defaultdict, yaml.representer.Representer.represent_dict)
data = load_yaml(filename)
data['ignore'][stage][series] = message
save_yaml(filename, data)
def whitelist_series(filename, stage, series):
"""
Checks if a series in a particular stage is blacklisted. If so, this removes it.
"""
# kickflip to create a recursive defaultdict, and register it with pyyaml
tree = lambda: collections.defaultdict(tree)
yaml.add_representer(collections.defaultdict, yaml.representer.Representer.represent_dict)
data = load_yaml(filename)
serieslist = data['ignore'][stage].keys()
if series in serieslist:
_ = data['ignore'][stage].pop(series)
save_yaml(filename, data)
def list_series(filename, stage):
"""
Returns all of the series from a stage as a list.
"""
data = load_yaml(filename)
serieslist = data['ignore'][stage].keys()
return serieslist
def touch_blacklist_stage(filename, stage):
"""
Initializes a stage in the YAML file.
"""
data = load_yaml(filename)
try:
_ = data['ignore'][stage]
except KeyError:
data['ignore'][stage] = {}
save_yaml(filename, data)
| 29.518987
| 110
| 0.665952
|
4a064849c1b890f1bc121ae00b4f5f7adc6d8a7b
| 32,526
|
py
|
Python
|
synapse/rest/client/v2_alpha/register.py
|
arthurthouzeau/synapse
|
7e460ec2a566b19bbcda63bc04b1e422127a99b3
|
[
"Apache-2.0"
] | 1
|
2021-09-09T08:50:13.000Z
|
2021-09-09T08:50:13.000Z
|
synapse/rest/client/v2_alpha/register.py
|
martindale/synapse
|
0f2629ebc6610971557df5810a9b34d7f07c0077
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/client/v2_alpha/register.py
|
martindale/synapse
|
0f2629ebc6610971557df5810a9b34d7f07c0077
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 - 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import logging
import random
from typing import List, Union
import synapse
import synapse.api.auth
import synapse.types
from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType
from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
SynapseError,
ThreepidValidationError,
UnrecognizedRequestError,
)
from synapse.config import ConfigError
from synapse.config.captcha import CaptchaConfig
from synapse.config.consent_config import ConsentConfig
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.ratelimiting import FederationRateLimitConfig
from synapse.config.registration import RegistrationConfig
from synapse.config.server import is_threepid_reserved
from synapse.handlers.auth import AuthHandler
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http.server import finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import canonicalise_email, check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
# We ought to be using hmac.compare_digest() but on older pythons it doesn't
# exist. It's a _really minor_ security flaw to use plain string comparison
# because the timing attack is so obscured by all the other code here it's
# unlikely to make much difference
if hasattr(hmac, "compare_digest"):
compare_digest = hmac.compare_digest
else:
def compare_digest(a, b):
return a == b
logger = logging.getLogger(__name__)
class EmailRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/email/requestToken$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.config = hs.config
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email_app_name,
template_html=self.config.email_registration_template_html,
template_text=self.config.email_registration_template_text,
)
async def on_POST(self, request):
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"Email registration has been disabled due to lack of email config"
)
raise SynapseError(
400, "Email-based registration has been disabled on this server"
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
# Extract params from body
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
# in synapse/rest/client/v2_alpha/account.py)
try:
email = canonicalise_email(body["email"])
except ValueError as e:
raise SynapseError(400, str(e))
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
"Your email domain is not authorized to register on this server",
Codes.THREEPID_DENIED,
)
await self.identity_handler.ratelimit_request_token_requests(
request, "email", email
)
existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid(
"email", email
)
if existing_user_id is not None:
if self.hs.config.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
assert self.hs.config.account_threepid_delegate_email
# Have the configured identity server handle the request
ret = await self.identity_handler.requestEmailToken(
self.hs.config.account_threepid_delegate_email,
email,
client_secret,
send_attempt,
next_link,
)
else:
# Send registration emails from Synapse
sid = await self.identity_handler.send_threepid_validation(
email,
client_secret,
send_attempt,
self.mailer.send_registration_mail,
next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
threepid_send_requests.labels(type="email", reason="register").observe(
send_attempt
)
return 200, ret
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/msisdn/requestToken$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
async def on_POST(self, request):
body = parse_json_object_from_request(request)
assert_params_in_dict(
body, ["client_secret", "country", "phone_number", "send_attempt"]
)
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
country = body["country"]
phone_number = body["phone_number"]
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
msisdn = phone_number_to_msisdn(country, phone_number)
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
403,
"Phone numbers are not authorized to register on this server",
Codes.THREEPID_DENIED,
)
await self.identity_handler.ratelimit_request_token_requests(
request, "msisdn", msisdn
)
existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid(
"msisdn", msisdn
)
if existing_user_id is not None:
if self.hs.config.request_token_inhibit_3pid_errors:
# Make the client think the operation succeeded. See the rationale in the
# comments for request_token_inhibit_3pid_errors.
# Also wait for some random amount of time between 100ms and 1s to make it
# look like we did something.
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
return 200, {"sid": random_string(16)}
raise SynapseError(
400, "Phone number is already in use", Codes.THREEPID_IN_USE
)
if not self.hs.config.account_threepid_delegate_msisdn:
logger.warning(
"No upstream msisdn account_threepid_delegate configured on the server to "
"handle this request"
)
raise SynapseError(
400, "Registration by phone number is not supported on this homeserver"
)
ret = await self.identity_handler.requestMsisdnToken(
self.hs.config.account_threepid_delegate_msisdn,
country,
phone_number,
client_secret,
send_attempt,
next_link,
)
threepid_send_requests.labels(type="msisdn", reason="register").observe(
send_attempt
)
return 200, ret
class RegistrationSubmitTokenServlet(RestServlet):
"""Handles registration 3PID validation token submission"""
PATTERNS = client_patterns(
"/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
)
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastore()
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
self._failure_email_template = (
self.config.email_registration_template_failure_html
)
async def on_GET(self, request, medium):
if medium != "email":
raise SynapseError(
400, "This medium is currently not supported for registration"
)
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
"User registration via email has been disabled due to lack of email config"
)
raise SynapseError(
400, "Email-based registration is disabled on this server"
)
sid = parse_string(request, "sid", required=True)
client_secret = parse_string(request, "client_secret", required=True)
assert_valid_client_secret(client_secret)
token = parse_string(request, "token", required=True)
# Attempt to validate a 3PID session
try:
# Mark the session as valid
next_link = await self.store.validate_threepid_session(
sid, client_secret, token, self.clock.time_msec()
)
# Perform a 302 redirect if next_link is set
if next_link:
if next_link.startswith("file:///"):
logger.warning(
"Not redirecting to next_link as it is a local file: address"
)
else:
request.setResponseCode(302)
request.setHeader("Location", next_link)
finish_request(request)
return None
# Otherwise show the success template
html = self.config.email_registration_template_success_html_content
status_code = 200
except ThreepidValidationError as e:
status_code = e.code
# Show a failure page with a reason
template_vars = {"failure_reason": e.msg}
html = self._failure_email_template.render(**template_vars)
respond_with_html(request, status_code, html)
class UsernameAvailabilityRestServlet(RestServlet):
PATTERNS = client_patterns("/register/available")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.registration_handler = hs.get_registration_handler()
self.ratelimiter = FederationRateLimiter(
hs.get_clock(),
FederationRateLimitConfig(
# Time window of 2s
window_size=2000,
# Artificially delay requests if rate > sleep_limit/window_size
sleep_limit=1,
# Amount of artificial delay to apply
sleep_msec=1000,
# Error with 429 if more than reject_limit requests are queued
reject_limit=1,
# Allow 1 request at a time
concurrent_requests=1,
),
)
async def on_GET(self, request):
if not self.hs.config.enable_registration:
raise SynapseError(
403, "Registration has been disabled", errcode=Codes.FORBIDDEN
)
ip = request.getClientIP()
with self.ratelimiter.ratelimit(ip) as wait_deferred:
await wait_deferred
username = parse_string(request, "username", required=True)
await self.registration_handler.check_username(username)
return 200, {"available": True}
class RegisterRestServlet(RestServlet):
PATTERNS = client_patterns("/register$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.auth_handler = hs.get_auth_handler()
self.registration_handler = hs.get_registration_handler()
self.identity_handler = hs.get_identity_handler()
self.room_member_handler = hs.get_room_member_handler()
self.macaroon_gen = hs.get_macaroon_generator()
self.ratelimiter = hs.get_registration_ratelimiter()
self.password_policy_handler = hs.get_password_policy_handler()
self.clock = hs.get_clock()
self._registration_enabled = self.hs.config.enable_registration
self._registration_flows = _calculate_registration_flows(
hs.config, self.auth_handler
)
@interactive_auth_handler
async def on_POST(self, request):
body = parse_json_object_from_request(request)
client_addr = request.getClientIP()
await self.ratelimiter.ratelimit(None, client_addr, update=False)
kind = b"user"
if b"kind" in request.args:
kind = request.args[b"kind"][0]
if kind == b"guest":
ret = await self._do_guest_registration(body, address=client_addr)
return ret
elif kind != b"user":
raise UnrecognizedRequestError(
"Do not understand membership kind: %s" % (kind.decode("utf8"),)
)
# Pull out the provided username and do basic sanity checks early since
# the auth layer will store these in sessions.
desired_username = None
if "username" in body:
if not isinstance(body["username"], str) or len(body["username"]) > 512:
raise SynapseError(400, "Invalid username")
desired_username = body["username"]
# fork off as soon as possible for ASes which have completely
# different registration flows to normal users
# == Application Service Registration ==
if body.get("type") == APP_SERVICE_REGISTRATION_TYPE:
if not self.auth.has_access_token(request):
raise SynapseError(
400,
"Appservice token must be provided when using a type of m.login.application_service",
)
# Verify the AS
self.auth.get_appservice_by_req(request)
# Set the desired user according to the AS API (which uses the
# 'user' key not 'username'). Since this is a new addition, we'll
# fallback to 'username' if they gave one.
desired_username = body.get("user", desired_username)
# XXX we should check that desired_username is valid. Currently
# we give appservices carte blanche for any insanity in mxids,
# because the IRC bridges rely on being able to register stupid
# IDs.
access_token = self.auth.get_access_token_from_request(request)
if not isinstance(desired_username, str):
raise SynapseError(400, "Desired Username is missing or not a string")
result = await self._do_appservice_registration(
desired_username, access_token, body
)
return 200, result
elif self.auth.has_access_token(request):
raise SynapseError(
400,
"An access token should not be provided on requests to /register (except if type is m.login.application_service)",
)
# == Normal User Registration == (everyone else)
if not self._registration_enabled:
raise SynapseError(403, "Registration has been disabled", Codes.FORBIDDEN)
# For regular registration, convert the provided username to lowercase
# before attempting to register it. This should mean that people who try
# to register with upper-case in their usernames don't get a nasty surprise.
#
# Note that we treat usernames case-insensitively in login, so they are
# free to carry on imagining that their username is CrAzYh4cKeR if that
# keeps them happy.
if desired_username is not None:
desired_username = desired_username.lower()
# Check if this account is upgrading from a guest account.
guest_access_token = body.get("guest_access_token", None)
# Pull out the provided password and do basic sanity checks early.
#
# Note that we remove the password from the body since the auth layer
# will store the body in the session and we don't want a plaintext
# password store there.
password = body.pop("password", None)
if password is not None:
if not isinstance(password, str) or len(password) > 512:
raise SynapseError(400, "Invalid password")
self.password_policy_handler.validate_password(password)
if "initial_device_display_name" in body and password is None:
# ignore 'initial_device_display_name' if sent without
# a password to work around a client bug where it sent
# the 'initial_device_display_name' param alone, wiping out
# the original registration params
logger.warning("Ignoring initial_device_display_name without password")
del body["initial_device_display_name"]
session_id = self.auth_handler.get_session_id(body)
registered_user_id = None
password_hash = None
if session_id:
# if we get a registered user id out of here, it means we previously
# registered a user for this session, so we could just return the
# user here. We carry on and go through the auth checks though,
# for paranoia.
registered_user_id = await self.auth_handler.get_session_data(
session_id, UIAuthSessionDataConstants.REGISTERED_USER_ID, None
)
# Extract the previously-hashed password from the session.
password_hash = await self.auth_handler.get_session_data(
session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None
)
# Ensure that the username is valid.
if desired_username is not None:
await self.registration_handler.check_username(
desired_username,
guest_access_token=guest_access_token,
assigned_user_id=registered_user_id,
)
# Check if the user-interactive authentication flows are complete, if
# not this will raise a user-interactive auth error.
try:
auth_result, params, session_id = await self.auth_handler.check_ui_auth(
self._registration_flows,
request,
body,
"register a new account",
)
except InteractiveAuthIncompleteError as e:
# The user needs to provide more steps to complete auth.
#
# Hash the password and store it with the session since the client
# is not required to provide the password again.
#
# If a password hash was previously stored we will not attempt to
# re-hash and store it for efficiency. This assumes the password
# does not change throughout the authentication flow, but this
# should be fine since the data is meant to be consistent.
if not password_hash and password:
password_hash = await self.auth_handler.hash(password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
password_hash,
)
raise
# Check that we're not trying to register a denied 3pid.
#
# the user-facing checks will probably already have happened in
# /register/email/requestToken when we requested a 3pid, but that's not
# guaranteed.
if auth_result:
for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]:
if login_type in auth_result:
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
if not check_3pid_allowed(self.hs, medium, address):
raise SynapseError(
403,
"Third party identifiers (email/phone numbers)"
+ " are not authorized on this server",
Codes.THREEPID_DENIED,
)
if registered_user_id is not None:
logger.info(
"Already registered user ID %r for this session", registered_user_id
)
# don't re-register the threepids
registered = False
else:
# If we have a password in this request, prefer it. Otherwise, there
# might be a password hash from an earlier request.
if password:
password_hash = await self.auth_handler.hash(password)
if not password_hash:
raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
desired_username = params.get("username", None)
guest_access_token = params.get("guest_access_token", None)
if desired_username is not None:
desired_username = desired_username.lower()
threepid = None
if auth_result:
threepid = auth_result.get(LoginType.EMAIL_IDENTITY)
# Also check that we're not trying to register a 3pid that's already
# been registered.
#
# This has probably happened in /register/email/requestToken as well,
# but if a user hits this endpoint twice then clicks on each link from
# the two activation emails, they would register the same 3pid twice.
for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]:
if login_type in auth_result:
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
# For emails, canonicalise the address.
# We store all email addresses canonicalised in the DB.
# (See on_POST in EmailThreepidRequestTokenRestServlet
# in synapse/rest/client/v2_alpha/account.py)
if medium == "email":
try:
address = canonicalise_email(address)
except ValueError as e:
raise SynapseError(400, str(e))
existing_user_id = await self.store.get_user_id_by_threepid(
medium, address
)
if existing_user_id is not None:
raise SynapseError(
400,
"%s is already in use" % medium,
Codes.THREEPID_IN_USE,
)
entries = await self.store.get_user_agents_ips_to_ui_auth_session(
session_id
)
registered_user_id = await self.registration_handler.register_user(
localpart=desired_username,
password_hash=password_hash,
guest_access_token=guest_access_token,
threepid=threepid,
address=client_addr,
user_agent_ips=entries,
)
# Necessary due to auth checks prior to the threepid being
# written to the db
if threepid:
if is_threepid_reserved(
self.hs.config.mau_limits_reserved_threepids, threepid
):
await self.store.upsert_monthly_active_user(registered_user_id)
# Remember that the user account has been registered (and the user
# ID it was registered with, since it might not have been specified).
await self.auth_handler.set_session_data(
session_id,
UIAuthSessionDataConstants.REGISTERED_USER_ID,
registered_user_id,
)
registered = True
return_dict = await self._create_registration_details(
registered_user_id, params
)
if registered:
await self.registration_handler.post_registration_actions(
user_id=registered_user_id,
auth_result=auth_result,
access_token=return_dict.get("access_token"),
)
return 200, return_dict
async def _do_appservice_registration(self, username, as_token, body):
user_id = await self.registration_handler.appservice_register(
username, as_token
)
return await self._create_registration_details(
user_id,
body,
is_appservice_ghost=True,
)
async def _create_registration_details(
self, user_id, params, is_appservice_ghost=False
):
"""Complete registration of newly-registered user
Allocates device_id if one was not given; also creates access_token.
Args:
(str) user_id: full canonical @user:id
(object) params: registration parameters, from which we pull
device_id, initial_device_name and inhibit_login
Returns:
dictionary for response from /register
"""
result = {"user_id": user_id, "home_server": self.hs.hostname}
if not params.get("inhibit_login", False):
device_id = params.get("device_id")
initial_display_name = params.get("initial_device_display_name")
device_id, access_token = await self.registration_handler.register_device(
user_id,
device_id,
initial_display_name,
is_guest=False,
is_appservice_ghost=is_appservice_ghost,
)
result.update({"access_token": access_token, "device_id": device_id})
return result
async def _do_guest_registration(self, params, address=None):
if not self.hs.config.allow_guest_access:
raise SynapseError(403, "Guest access is disabled")
user_id = await self.registration_handler.register_user(
make_guest=True, address=address
)
# we don't allow guests to specify their own device_id, because
# we have nowhere to store it.
device_id = synapse.api.auth.GUEST_DEVICE_ID
initial_display_name = params.get("initial_device_display_name")
device_id, access_token = await self.registration_handler.register_device(
user_id, device_id, initial_display_name, is_guest=True
)
return (
200,
{
"user_id": user_id,
"device_id": device_id,
"access_token": access_token,
"home_server": self.hs.hostname,
},
)
def _calculate_registration_flows(
# technically `config` has to provide *all* of these interfaces, not just one
config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig],
auth_handler: AuthHandler,
) -> List[List[str]]:
"""Get a suitable flows list for registration
Args:
config: server configuration
auth_handler: authorization handler
Returns: a list of supported flows
"""
# FIXME: need a better error than "no auth flow found" for scenarios
# where we required 3PID for registration but the user didn't give one
require_email = "email" in config.registrations_require_3pid
require_msisdn = "msisdn" in config.registrations_require_3pid
show_msisdn = True
show_email = True
if config.disable_msisdn_registration:
show_msisdn = False
require_msisdn = False
enabled_auth_types = auth_handler.get_enabled_auth_types()
if LoginType.EMAIL_IDENTITY not in enabled_auth_types:
show_email = False
if require_email:
raise ConfigError(
"Configuration requires email address at registration, but email "
"validation is not configured"
)
if LoginType.MSISDN not in enabled_auth_types:
show_msisdn = False
if require_msisdn:
raise ConfigError(
"Configuration requires msisdn at registration, but msisdn "
"validation is not configured"
)
flows = []
# only support 3PIDless registration if no 3PIDs are required
if not require_email and not require_msisdn:
# Add a dummy step here, otherwise if a client completes
# recaptcha first we'll assume they were going for this flow
# and complete the request, when they could have been trying to
# complete one of the flows with email/msisdn auth.
flows.append([LoginType.DUMMY])
# only support the email-only flow if we don't require MSISDN 3PIDs
if show_email and not require_msisdn:
flows.append([LoginType.EMAIL_IDENTITY])
# only support the MSISDN-only flow if we don't require email 3PIDs
if show_msisdn and not require_email:
flows.append([LoginType.MSISDN])
if show_email and show_msisdn:
# always let users provide both MSISDN & email
flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY])
# Prepend m.login.terms to all flows if we're requiring consent
if config.user_consent_at_registration:
for flow in flows:
flow.insert(0, LoginType.TERMS)
# Prepend recaptcha to all flows if we're requiring captcha
if config.enable_registration_captcha:
for flow in flows:
flow.insert(0, LoginType.RECAPTCHA)
return flows
def register_servlets(hs, http_server):
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server)
RegistrationSubmitTokenServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
| 39.425455
| 130
| 0.623778
|
4a0648be0dd3b82baa0eb3e6de8398286399f07d
| 2,338
|
py
|
Python
|
rlpyt/experiments/scripts/procgen/pg/launch/launch_procgen_ff_ppo_gpu.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
rlpyt/experiments/scripts/procgen/pg/launch/launch_procgen_ff_ppo_gpu.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
rlpyt/experiments/scripts/procgen/pg/launch/launch_procgen_ff_ppo_gpu.py
|
DavidSlayback/rlpyt
|
445adbd3917842caae0cae0d06e4b2866c8f1258
|
[
"MIT"
] | null | null | null |
import pathlib
from rlpyt.utils.launching.affinity import encode_affinity
from rlpyt.utils.launching.exp_launcher import run_experiments
from rlpyt.utils.launching.variant import make_variants, VariantLevel
affinity_code = encode_affinity(
n_cpu_core=6,
n_gpu=1,
contexts_per_gpu=1,
n_socket=1,
alternating=True
)
runs_per_setting = 3 # 3 runs
# Paths
path_a2c = (pathlib.Path(__file__).resolve().parent.parent / 'train' / "procgen_ff_a2c_gpu.py").as_posix()
path_ppo = (pathlib.Path(__file__).resolve().parent.parent / 'train' / "procgen_ff_ppo_gpu.py").as_posix()
path_a2oc = (pathlib.Path(__file__).resolve().parent.parent / 'train' / "procgen_ff_a2oc_gpu.py").as_posix()
path_ppoc = (pathlib.Path(__file__).resolve().parent.parent / 'train' / "procgen_ff_ppoc_gpu.py").as_posix()
# Default keys
default_key = 'base'
oc_key = 'base_4_oc'
int_key = 'base_interest'
# Param options
A2C_LRS = list(zip([3e-4, 1e-3, 3e-3, 1e-2]))
NOC_FC_SIZES = list(zip([512, 2048]))
PPO_LRS = list(zip([3e-4, 1e-3, 3e-3]))
OC_DELIB = list(zip([0., 0.02]))
games = list(zip(['fruitbot', 'coinrun', 'caveflyer']))
# Variant keys
lr_key = [("algo", "learning_rate")]
delib_key = [("algo", "delib_cost")]
fc_key = [("model", "fc_sizes")]
interest_key = [("model", "use_interest")]
game_key = [("env", "game")]
game_names = ["{}".format(*v) for v in games]
delib_names = ["D_{}".format(*v) for v in OC_DELIB]
# PPO
experiment_title = "PPO_Procgen"
variant_levels = list()
variant_levels.append(VariantLevel(game_key, games, game_names)) # Games
variants, log_dirs = make_variants(*variant_levels)
run_experiments(
script=path_ppo,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=variants,
log_dirs=log_dirs,
common_args=(default_key,),
)
# PPOC
experiment_title = "PPOC_Procgen"
variant_levels = list()
variant_levels.append(VariantLevel(game_key, games, game_names)) # Games
variant_levels.append(VariantLevel(delib_key, OC_DELIB, delib_names)) # Deliberation cost
variants, log_dirs = make_variants(*variant_levels)
run_experiments(
script=path_ppoc,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=variants,
log_dirs=log_dirs,
common_args=(oc_key,),
)
| 33.884058
| 108
| 0.736527
|
4a0648f73f5c3ef42085c6167160b0aca60193b6
| 237
|
py
|
Python
|
pizzas/io_files/dataclass.py
|
corderop/hashcode-2022-traning
|
b62286eee0a477b6548591e4607c7eaacb00e0ec
|
[
"MIT"
] | null | null | null |
pizzas/io_files/dataclass.py
|
corderop/hashcode-2022-traning
|
b62286eee0a477b6548591e4607c7eaacb00e0ec
|
[
"MIT"
] | null | null | null |
pizzas/io_files/dataclass.py
|
corderop/hashcode-2022-traning
|
b62286eee0a477b6548591e4607c7eaacb00e0ec
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
@dataclass
class ProblemData:
max_slices: int
number_of_types: int
types: List[int]
@dataclass
class SolutionData:
number_of_types: int
types: List[int]
| 16.928571
| 33
| 0.734177
|
4a0649680caf310d741d016891ad65ae1b7f7e2e
| 1,213
|
py
|
Python
|
yt_dlp/postprocessor/execafterdownload.py
|
CXwudi/yt-dlp
|
d2558234cf5dd12d6896eed5427b7dcdb3ab7b5a
|
[
"Unlicense"
] | null | null | null |
yt_dlp/postprocessor/execafterdownload.py
|
CXwudi/yt-dlp
|
d2558234cf5dd12d6896eed5427b7dcdb3ab7b5a
|
[
"Unlicense"
] | null | null | null |
yt_dlp/postprocessor/execafterdownload.py
|
CXwudi/yt-dlp
|
d2558234cf5dd12d6896eed5427b7dcdb3ab7b5a
|
[
"Unlicense"
] | null | null | null |
from __future__ import unicode_literals
import re
import subprocess
from .common import PostProcessor
from ..compat import compat_shlex_quote
from ..utils import (
encodeArgument,
FORMAT_RE,
PostProcessingError,
)
class ExecAfterDownloadPP(PostProcessor):
def __init__(self, downloader, exec_cmd):
super(ExecAfterDownloadPP, self).__init__(downloader)
self.exec_cmd = exec_cmd
@classmethod
def pp_key(cls):
return 'Exec'
def parse_cmd(self, cmd, info):
# If no %(key)s is found, replace {} for backard compatibility
if not re.search(FORMAT_RE.format(r'[-\w>.+]+'), cmd):
if '{}' not in cmd:
cmd += ' {}'
return cmd.replace('{}', compat_shlex_quote(info['filepath']))
tmpl, info_copy = self._downloader.prepare_outtmpl(cmd, info)
return tmpl % info_copy
def run(self, info):
cmd = self.parse_cmd(self.exec_cmd, info)
self.to_screen('Executing command: %s' % cmd)
retCode = subprocess.call(encodeArgument(cmd), shell=True)
if retCode != 0:
raise PostProcessingError('Command returned error code %d' % retCode)
return [], info
| 28.880952
| 81
| 0.643034
|
4a0649a982f94c9caa317b3d5c521255d6c47ef2
| 33,231
|
py
|
Python
|
haystack/document_store/weaviate.py
|
srevinsaju/haystack
|
e857233313569c89bb9b12b02e672c2eda452716
|
[
"Apache-2.0"
] | 9
|
2020-10-23T14:39:45.000Z
|
2021-11-16T10:37:11.000Z
|
haystack/document_store/weaviate.py
|
bluepeter/haystack
|
0168f043850f02bdfd00bebc36b0b05ae625aae1
|
[
"Apache-2.0"
] | 12
|
2020-10-07T08:07:51.000Z
|
2020-10-22T14:20:19.000Z
|
haystack/document_store/weaviate.py
|
bluepeter/haystack
|
0168f043850f02bdfd00bebc36b0b05ae625aae1
|
[
"Apache-2.0"
] | 1
|
2021-06-10T13:54:44.000Z
|
2021-06-10T13:54:44.000Z
|
import logging
from typing import Any, Dict, Generator, List, Optional, Union
import numpy as np
from tqdm import tqdm
from haystack import Document
from haystack.document_store.base import BaseDocumentStore
from haystack.utils import get_batches_from_generator
from weaviate import client, auth, AuthClientPassword
from weaviate import ObjectsBatchRequest
logger = logging.getLogger(__name__)
class WeaviateDocumentStore(BaseDocumentStore):
"""
Weaviate is a cloud-native, modular, real-time vector search engine built to scale your machine learning models.
(See https://www.semi.technology/developers/weaviate/current/index.html#what-is-weaviate)
Some of the key differences in contrast to FAISS & Milvus:
1. Stores everything in one place: documents, meta data and vectors - so less network overhead when scaling this up
2. Allows combination of vector search and scalar filtering, i.e. you can filter for a certain tag and do dense retrieval on that subset
3. Has less variety of ANN algorithms, as of now only HNSW.
Weaviate python client is used to connect to the server, more details are here
https://weaviate-python-client.readthedocs.io/en/docs/weaviate.html
Usage:
1. Start a Weaviate server (see https://www.semi.technology/developers/weaviate/current/getting-started/installation.html)
2. Init a WeaviateDocumentStore in Haystack
"""
def __init__(
self,
host: Union[str, List[str]] = "http://localhost",
port: Union[int, List[int]] = 8080,
timeout_config: tuple = (5, 15),
username: str = None,
password: str = None,
index: str = "Document",
embedding_dim: int = 768,
text_field: str = "text",
name_field: str = "name",
faq_question_field = "question",
similarity: str = "dot_product",
index_type: str = "hnsw",
custom_schema: Optional[dict] = None,
return_embedding: bool = False,
embedding_field: str = "embedding",
progress_bar: bool = True,
duplicate_documents: str = 'overwrite',
**kwargs,
):
"""
:param host: Weaviate server connection URL for storing and processing documents and vectors.
For more details, refer "https://www.semi.technology/developers/weaviate/current/getting-started/installation.html"
:param port: port of Weaviate instance
:param timeout_config: Weaviate Timeout config as a tuple of (retries, time out seconds).
:param username: username (standard authentication via http_auth)
:param password: password (standard authentication via http_auth)
:param index: Index name for document text, embedding and metadata (in Weaviate terminology, this is a "Class" in Weaviate schema).
:param embedding_dim: The embedding vector size. Default: 768.
:param text_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. "full_text").
If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.
:param name_field: Name of field that contains the title of the the doc
:param faq_question_field: Name of field containing the question in case of FAQ-Style QA
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default.
:param index_type: Index type of any vector object defined in weaviate schema. The vector index type is pluggable.
Currently, HSNW is only supported.
See: https://www.semi.technology/developers/weaviate/current/more-resources/performance.html
:param custom_schema: Allows to create custom schema in Weaviate, for more details
See https://www.semi.technology/developers/weaviate/current/data-schema/schema-configuration.html
:param module_name : Vectorization module to convert data into vectors. Default is "text2vec-trasnformers"
For more details, See https://www.semi.technology/developers/weaviate/current/modules/
:param return_embedding: To return document embedding.
:param embedding_field: Name of field containing an embedding vector.
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
:param duplicate_documents:Handle duplicates document based on parameter options.
Parameter options : ( 'skip','overwrite','fail')
skip: Ignore the duplicates documents
overwrite: Update any existing documents with the same ID when adding documents.
fail: an error is raised if the document ID of the document being added already exists.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
host=host, port=port, timeout_config=timeout_config, username=username, password=password,
index=index, embedding_dim=embedding_dim, text_field=text_field, name_field=name_field,
faq_question_field=faq_question_field, similarity=similarity, index_type=index_type,
custom_schema=custom_schema,return_embedding=return_embedding, embedding_field=embedding_field,
progress_bar=progress_bar, duplicate_documents=duplicate_documents
)
# Connect to Weaviate server using python binding
weaviate_url =f"{host}:{port}"
if username and password:
secret = AuthClientPassword(username, password)
self.weaviate_client = client.Client(url=weaviate_url,
auth_client_secret=secret,
timeout_config=timeout_config)
else:
self.weaviate_client = client.Client(url=weaviate_url,
timeout_config=timeout_config)
# Test Weaviate connection
try:
status = self.weaviate_client.is_ready()
if not status:
raise ConnectionError(
f"Initial connection to Weaviate failed. Make sure you run Weaviate instance "
f"at `{weaviate_url}` and that it has finished the initial ramp up (can take > 30s)."
)
except Exception:
raise ConnectionError(
f"Initial connection to Weaviate failed. Make sure you run Weaviate instance "
f"at `{weaviate_url}` and that it has finished the initial ramp up (can take > 30s)."
)
self.index = index
self.embedding_dim = embedding_dim
self.text_field = text_field
self.name_field = name_field
self.faq_question_field = faq_question_field
self.similarity = similarity
self.index_type = index_type
self.custom_schema = custom_schema
self.return_embedding = return_embedding
self.embedding_field = embedding_field
self.progress_bar = progress_bar
self.duplicate_documents = duplicate_documents
self._create_schema_and_index_if_not_exist(self.index)
def _create_schema_and_index_if_not_exist(
self,
index: Optional[str] = None,
):
"""Create a new index (schema/class in Weaviate) for storing documents in case if an index (schema) with the name doesn't exist already."""
index = index or self.index
if self.custom_schema:
schema = self.custom_schema
else:
schema = {
"classes": [
{
"class": index,
"description": "Haystack index, it's a class in Weaviate",
"invertedIndexConfig": {
"cleanupIntervalSeconds": 60
},
"vectorizer": "none",
"properties": [
{
"dataType": [
"string"
],
"description": "Name Field",
"name": self.name_field
},
{
"dataType": [
"string"
],
"description": "Question Field",
"name": self.faq_question_field
},
{
"dataType": [
"text"
],
"description": "Document Text",
"name": self.text_field
},
],
}
]
}
if not self.weaviate_client.schema.contains(schema):
self.weaviate_client.schema.create(schema)
def _convert_weaviate_result_to_document(
self,
result: dict,
return_embedding: bool
) -> Document:
"""
Convert weaviate result dict into haystack document object. This is more involved because
weaviate search result dict varies between get and query interfaces.
Weaviate get methods return the data items in properties key, whereas the query doesn't.
"""
score = None
probability = None
text = ""
question = None
id = result.get("id")
embedding = result.get("vector")
# If properties key is present, get all the document fields from it.
# otherwise, a direct lookup in result root dict
props = result.get("properties")
if not props:
props = result
if props.get(self.text_field) is not None:
text = str(props.get(self.text_field))
if props.get(self.faq_question_field) is not None:
question = props.get(self.faq_question_field)
# Weaviate creates "_additional" key for semantic search
if "_additional" in props:
if "certainty" in props["_additional"]:
score = props["_additional"]['certainty']
probability = score
if "id" in props["_additional"]:
id = props["_additional"]['id']
if "vector" in props["_additional"]:
embedding = props["_additional"]['vector']
props.pop("_additional", None)
# We put all additional data of the doc into meta_data and return it in the API
meta_data = {k:v for k,v in props.items() if k not in (self.text_field, self.faq_question_field, self.embedding_field)}
if return_embedding and embedding:
embedding = np.asarray(embedding, dtype=np.float32)
document = Document(
id=id,
text=text,
meta=meta_data,
score=score,
probability=probability,
question=question,
embedding=embedding,
)
return document
def _create_document_field_map(self) -> Dict:
return {
self.text_field: "text",
self.embedding_field: "embedding",
self.faq_question_field if self.faq_question_field else "question": "question"
}
def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:
"""Fetch a document by specifying its text id string"""
# Sample result dict from a get method
'''{'class': 'Document',
'creationTimeUnix': 1621075584724,
'id': '1bad51b7-bd77-485d-8871-21c50fab248f',
'properties': {'meta': "{'key1':'value1'}",
'name': 'name_5',
'text': 'text_5'},
'vector': []}'''
index = index or self.index
document = None
result = self.weaviate_client.data_object.get_by_id(id, with_vector=True)
if result:
document = self._convert_weaviate_result_to_document(result, return_embedding=True)
return document
def get_documents_by_id(self, ids: List[str], index: Optional[str] = None,
batch_size: int = 10_000) -> List[Document]:
"""Fetch documents by specifying a list of text id strings"""
index = index or self.index
documents = []
#TODO: better implementation with multiple where filters instead of chatty call below?
for id in ids:
result = self.weaviate_client.data_object.get_by_id(id, with_vector=True)
if result:
document = self._convert_weaviate_result_to_document(result, return_embedding=True)
documents.append(document)
return documents
def _get_current_properties(self, index: Optional[str] = None) -> List[str]:
"""Get all the existing properties in the schema"""
index = index or self.index
cur_properties = []
for class_item in self.weaviate_client.schema.get()['classes']:
if class_item['class'] == index:
cur_properties = [item['name'] for item in class_item['properties']]
return cur_properties
def _build_filter_clause(self, filters:Dict[str, List[str]]) -> dict:
"""Transform Haystack filter conditions to Weaviate where filter clauses"""
weaviate_filters = []
weaviate_filter = {}
for key, values in filters.items():
for value in values:
weaviate_filter = {
"path": [key],
"operator": "Equal",
"valueString": value
}
weaviate_filters.append(weaviate_filter)
if len(weaviate_filters) > 1:
filter_dict = {
"operator": "Or",
"operands": weaviate_filters
}
return filter_dict
else:
return weaviate_filter
def _update_schema(self, new_prop:str, index: Optional[str] = None):
"""Updates the schema with a new property"""
index = index or self.index
property_dict = {
"dataType": [
"string"
],
"description": f"dynamic property {new_prop}",
"name": new_prop
}
self.weaviate_client.schema.property.create(index, property_dict)
def _check_document(self, cur_props: List[str], doc: dict) -> List[str]:
"""Find the properties in the document that don't exist in the existing schema"""
return [item for item in doc.keys() if item not in cur_props]
def write_documents(
self, documents: Union[List[dict], List[Document]], index: Optional[str] = None,
batch_size: int = 10_000, duplicate_documents: Optional[str] = None):
"""
Add new documents to the DocumentStore.
:param documents: List of `Dicts` or List of `Documents`. Passing an Embedding/Vector is mandatory in case weaviate is not
configured with a module. If a module is configured, the embedding is automatically generated by Weaviate.
:param index: index name for storing the docs and metadata
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:param duplicate_documents: Handle duplicates document based on parameter options.
Parameter options : ( 'skip','overwrite','fail')
skip: Ignore the duplicates documents
overwrite: Update any existing documents with the same ID when adding documents.
fail: an error is raised if the document ID of the document being added already
exists.
:raises DuplicateDocumentError: Exception trigger on duplicate document
:return: None
"""
index = index or self.index
self._create_schema_and_index_if_not_exist(index)
field_map = self._create_document_field_map()
duplicate_documents = duplicate_documents or self.duplicate_documents
assert duplicate_documents in self.duplicate_documents_options, \
f"duplicate_documents parameter must be {', '.join(self.duplicate_documents_options)}"
if len(documents) == 0:
logger.warning("Calling DocumentStore.write_documents() with empty list")
return
# Auto schema feature https://github.com/semi-technologies/weaviate/issues/1539
# Get and cache current properties in the schema
current_properties = self._get_current_properties(index)
document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents]
document_objects = self._handle_duplicate_documents(document_objects, duplicate_documents)
batched_documents = get_batches_from_generator(document_objects, batch_size)
with tqdm(total=len(document_objects), disable=not self.progress_bar) as progress_bar:
for document_batch in batched_documents:
docs_batch = ObjectsBatchRequest()
for idx, doc in enumerate(document_batch):
_doc = {
**doc.to_dict(field_map=self._create_document_field_map())
}
_ = _doc.pop("score", None)
_ = _doc.pop("probability", None)
# In order to have a flat structure in elastic + similar behaviour to the other DocumentStores,
# we "unnest" all value within "meta"
if "meta" in _doc.keys():
for k, v in _doc["meta"].items():
_doc[k] = v
_doc.pop("meta")
doc_id = str(_doc.pop("id"))
vector = _doc.pop(self.embedding_field)
if _doc.get(self.faq_question_field) is None:
_doc.pop(self.faq_question_field)
# Check if additional properties are in the document, if so,
# append the schema with all the additional properties
missing_props = self._check_document(current_properties, _doc)
if missing_props:
for property in missing_props:
self._update_schema(property, index)
current_properties.append(property)
docs_batch.add(_doc, class_name=index, uuid=doc_id, vector=vector)
# Ingest a batch of documents
results = self.weaviate_client.batch.create(docs_batch)
# Weaviate returns errors for every failed document in the batch
if results is not None:
for result in results:
if 'result' in result and 'errors' in result['result'] \
and 'error' in result['result']['errors']:
for message in result['result']['errors']['error']:
logger.error(f"{message['message']}")
progress_bar.update(batch_size)
progress_bar.close()
def update_document_meta(self, id: str, meta: Dict[str, str]):
"""
Update the metadata dictionary of a document by specifying its string id
"""
self.weaviate_client.data_object.update(meta, class_name=self.index, uuid=id)
def get_document_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int:
"""
Return the number of documents in the document store.
"""
index = index or self.index
doc_count = 0
if filters:
filter_dict = self._build_filter_clause(filters=filters)
result = self.weaviate_client.query.aggregate(index) \
.with_fields("meta { count }") \
.with_where(filter_dict)\
.do()
else:
result = self.weaviate_client.query.aggregate(index)\
.with_fields("meta { count }")\
.do()
if "data" in result:
if "Aggregate" in result.get('data'):
doc_count = result.get('data').get('Aggregate').get(index)[0]['meta']['count']
return doc_count
def get_all_documents(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> List[Document]:
"""
Get documents from the document store.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
index = index or self.index
result = self.get_all_documents_generator(
index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size
)
documents = list(result)
return documents
def _get_all_documents_in_index(
self,
index: Optional[str],
filters: Optional[Dict[str, List[str]]] = None,
batch_size: int = 10_000,
only_documents_without_embedding: bool = False,
) -> Generator[dict, None, None]:
"""
Return all documents in a specific index in the document store
"""
index = index or self.index
# Build the properties to retrieve from Weaviate
properties = self._get_current_properties(index)
properties.append("_additional {id, certainty, vector}")
if filters:
filter_dict = self._build_filter_clause(filters=filters)
result = self.weaviate_client.query.get(class_name=index, properties=properties)\
.with_where(filter_dict)\
.do()
else:
result = self.weaviate_client.query.get(class_name=index, properties=properties)\
.do()
all_docs = {}
if result and "data" in result and "Get" in result.get("data"):
if result.get("data").get("Get").get(index):
all_docs = result.get("data").get("Get").get(index)
yield from all_docs
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
results = self._get_all_documents_in_index(index=index, filters=filters, batch_size=batch_size)
for result in results:
document = self._convert_weaviate_result_to_document(result, return_embedding=return_embedding)
yield document
def query(
self,
query: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
custom_query: Optional[str] = None,
index: Optional[str] = None,
) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query as defined by Weaviate semantic search.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param custom_query: Custom query that will executed using query.raw method, for more details refer
https://www.semi.technology/developers/weaviate/current/graphql-references/filters.html
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
index = index or self.index
# Build the properties to retrieve from Weaviate
properties = self._get_current_properties(index)
properties.append("_additional {id, certainty, vector}")
if custom_query:
query_output = self.weaviate_client.query.raw(custom_query)
elif filters:
filter_dict = self._build_filter_clause(filters)
query_output = self.weaviate_client.query\
.get(class_name=index, properties=properties)\
.with_where(filter_dict)\
.with_limit(top_k)\
.do()
else:
raise NotImplementedError("Weaviate does not support inverted index text query. However, "
"it allows to search by filters example : {'text': 'some text'} or "
"use a custom GraphQL query in text format!")
results = []
if query_output and "data" in query_output and "Get" in query_output.get("data"):
if query_output.get("data").get("Get").get(index):
results = query_output.get("data").get("Get").get(index)
documents = []
for result in results:
doc = self._convert_weaviate_result_to_document(result, return_embedding=True)
documents.append(doc)
return documents
def query_by_embedding(self,
query_emb: np.ndarray,
filters: Optional[dict] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param top_k: How many documents to return
:param index: index name for storing the docs and metadata
:param return_embedding: To return document embedding
:return:
"""
if return_embedding is None:
return_embedding = self.return_embedding
index = index or self.index
# Build the properties to retrieve from Weaviate
properties = self._get_current_properties(index)
properties.append("_additional {id, certainty, vector}")
query_emb = query_emb.reshape(1, -1).astype(np.float32)
query_string = {
"vector" : query_emb
}
if filters:
filter_dict = self._build_filter_clause(filters)
query_output = self.weaviate_client.query\
.get(class_name=index, properties=properties)\
.with_where(filter_dict)\
.with_near_vector(query_string)\
.with_limit(top_k)\
.do()
else:
query_output = self.weaviate_client.query\
.get(class_name=index, properties=properties)\
.with_near_vector(query_string)\
.with_limit(top_k)\
.do()
results = []
if query_output and "data" in query_output and "Get" in query_output.get("data"):
if query_output.get("data").get("Get").get(index):
results = query_output.get("data").get("Get").get(index)
documents = []
for result in results:
doc = self._convert_weaviate_result_to_document(result, return_embedding=return_embedding)
documents.append(doc)
return documents
def update_embeddings(
self,
retriever,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
update_existing_embeddings: bool = True,
batch_size: int = 10_000
):
"""
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to change the embeddings for your documents (e.g. after changing the retriever config).
:param retriever: Retriever to use to update the embeddings.
:param index: Index name to update
:param update_existing_embeddings: Weaviate mandates an embedding while creating the document itself.
This option must be always true for weaviate and it will update the embeddings for all the documents.
:param filters: Optional filters to narrow down the documents for which embeddings are to be updated.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
"""
if index is None:
index = self.index
if not self.embedding_field:
raise RuntimeError("Specify the arg `embedding_field` when initializing WeaviateDocumentStore()")
if update_existing_embeddings:
logger.info(f"Updating embeddings for all {self.get_document_count(index=index)} docs ...")
else:
raise RuntimeError("All the documents in Weaviate store have an embedding by default. Only update is allowed!")
result = self._get_all_documents_in_index(
index=index,
filters=filters,
batch_size=batch_size,
)
for result_batch in get_batches_from_generator(result, batch_size):
document_batch = [self._convert_weaviate_result_to_document(hit, return_embedding=False) for hit in result_batch]
embeddings = retriever.embed_passages(document_batch) # type: ignore
assert len(document_batch) == len(embeddings)
if embeddings[0].shape[0] != self.embedding_dim:
raise RuntimeError(f"Embedding dim. of model ({embeddings[0].shape[0]})"
f" doesn't match embedding dim. in DocumentStore ({self.embedding_dim})."
"Specify the arg `embedding_dim` when initializing WeaviateDocumentStore()")
for doc, emb in zip(document_batch, embeddings):
# Using update method to only update the embeddings, other properties will be in tact
self.weaviate_client.data_object.update({}, class_name=index, uuid=doc.id, vector=emb)
def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None):
"""
Delete documents in an index. All documents are deleted if no filters are passed.
:param index: Index name to delete the document from.
:param filters: Optional filters to narrow down the documents to be deleted.
:return: None
"""
index = index or self.index
if filters:
docs_to_delete = self.get_all_documents(index, filters=filters)
for doc in docs_to_delete:
self.weaviate_client.data_object.delete(doc.id)
else:
self.weaviate_client.schema.delete_class(index)
self._create_schema_and_index_if_not_exist(index)
| 47.608883
| 147
| 0.598297
|
4a064b6623cb63f3ab3457f7b7c4383912d3bfdb
| 4,979
|
py
|
Python
|
venv/Lib/site-packages/music21/tie.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | 1
|
2022-01-28T00:03:19.000Z
|
2022-01-28T00:03:19.000Z
|
venv/Lib/site-packages/music21/tie.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/music21/tie.py
|
alimirzazadeh/wolfGANg
|
5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c
|
[
"MIT"
] | 1
|
2021-11-23T00:49:26.000Z
|
2021-11-23T00:49:26.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: tie.py
# Purpose: music21 classes for representing ties (visual and conceptual)
#
# Authors: Michael Scott Cuthbert
# Christopher Ariza
#
# Copyright: Copyright © 2009-2010, 2012, 2015 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
The `tie` module contains a single class, `Tie` that represents the visual and
conceptual idea of tied notes. They can be start or stop ties.
'''
import unittest
from music21 import exceptions21
from music21.common.objects import SlottedObjectMixin
from music21 import prebase
class TieException(exceptions21.Music21Exception):
pass
# ------------------------------------------------------------------------------
class Tie(prebase.ProtoM21Object, SlottedObjectMixin):
'''
Object added to notes that are tied to other notes. The `type` value is one
of start, stop, or continue.
>>> note1 = note.Note()
>>> note1.tie = tie.Tie('start') # start, stop, or continue
>>> note1.tie.style = 'normal' # default; could also be 'dotted' or 'dashed' or 'hidden'
>>> note1.tie.type
'start'
>>> note1.tie
<music21.tie.Tie start>
Generally Ties have a placement of None, but if they are defined
as 'above' or 'below' this will be retained. (see:
http://forums.makemusic.com/viewtopic.php?f=12&t=2179&start=0
for how orientation and placement in musicxml are essentially the same
content).
>>> note1.tie.placement is None
True
Differences from MusicXML:
* notes do not need to know if they are tied from a
previous note. i.e., you can tie n1 to n2 just with
a tie start on n1. However, if you want proper musicXML output
you need a tie stop on n2.
* one tie with "continue" implies tied from and tied to.
The tie.style only applies to ties of type 'start' or 'continue' (and then
only to the next part of the tie). For instance, if there are two
tied notes, and the first note has a 'dotted'-start tie, and the
second note has a 'dashed'-stop tie, the graphical tie itself will be dotted.
A type of tie that is unknown raises a ValueError:
>>> tie.Tie('hello')
Traceback (most recent call last):
music21.tie.TieException: Type must be one of
('start', 'stop', 'continue', 'let-ring', 'continue-let-ring'), not hello
OMIT_FROM_DOCS
optional (to know what notes are next:)
.to = note() # not implemented yet, b/c of garbage coll.
.from = note()
(question: should notes be able to be tied to multiple notes
for the case where a single note is tied both voices of a
two-note-head unison?)
'''
# CLASS VARIABLES #
__slots__ = (
'id',
'placement',
'style',
'type',
)
_DOC_ATTR = {
'type': '''
The tie type, can be 'start', 'stop', 'continue', 'let-ring', or 'continue-let-ring'.
''',
'style': '''
The style of the tie. Currently can be 'normal', 'dotted', 'dashed' or 'hidden'
''',
'placement': '''
Whether the tie should go up or down. Can be None, meaning
it is unknown or should be determined from context, or 'above' or 'below.
''',
}
VALID_TIE_TYPES = ('start', 'stop', 'continue', 'let-ring', 'continue-let-ring')
# pylint: disable=redefined-builtin
def __init__(self, type='start'): # @ReservedAssignment
# super().__init__() # no need for ProtoM21Object or SlottedObjectMixin
if type not in self.VALID_TIE_TYPES:
raise TieException(
f'Type must be one of {self.VALID_TIE_TYPES}, not {type}')
# naming this 'type' was a mistake, because cannot create a property of this name.
self.id = id(self)
self.type = type
self.style = 'normal'
self.placement = None # = unknown, can be 'above' or 'below'
# SPECIAL METHODS #
def __eq__(self, other):
# noinspection PyComparisonWithNone
'''
Equality. Based entirely on Tie.type.
>>> t1 = tie.Tie('start')
>>> t2 = tie.Tie('start')
>>> t3 = tie.Tie('stop')
>>> t1 == t2
True
>>> t2 == t3, t3 == t1
(False, False)
>>> t2 == None
False
'''
if other is None or not isinstance(other, Tie):
return False
elif self.type == other.type:
return True
return False
def _reprInternal(self):
return self.type
class Test(unittest.TestCase):
pass
# ------------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
| 32.122581
| 97
| 0.572404
|
4a064bb5bad3204ff0c430eb42466cb49537b842
| 2,098
|
py
|
Python
|
tasks/app/_utils.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 1,420
|
2015-11-20T01:25:14.000Z
|
2022-03-22T03:51:33.000Z
|
tasks/app/_utils.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 151
|
2016-01-07T09:11:42.000Z
|
2020-11-17T08:37:07.000Z
|
tasks/app/_utils.py
|
IsmaelJS/test-github-actions
|
97223df261e9736c46875f590c9593dbac0d417b
|
[
"MIT"
] | 389
|
2015-11-23T01:14:31.000Z
|
2022-02-07T08:23:11.000Z
|
# encoding: utf-8
"""
Invoke tasks utilities for apps.
"""
import functools
from invoke import Task as BaseTask
class Task(BaseTask):
"""
A patched Invoke Task adding support for decorated functions.
"""
def __init__(self, *args, **kwargs):
super(Task, self).__init__(*args, **kwargs)
# Make these tasks always contextualized (this is the only option in
# Invoke >=0.13), so we just backport this default on Invoke 0.12.
self.contextualized = True
def argspec(self, body):
"""
See details in https://github.com/pyinvoke/invoke/pull/399.
"""
if hasattr(body, '__wrapped__'):
return self.argspec(body.__wrapped__)
return super(Task, self).argspec(body)
def app_context_task(*args, **kwargs):
"""
A helper Invoke Task decorator with auto app context activation.
Examples:
>>> @app_context_task
... def my_task(context, some_arg, some_option='default'):
... print("Done")
>>> @app_context_task(
... help={'some_arg': "This is something useful"}
... )
... def my_task(context, some_arg, some_option='default'):
... print("Done")
"""
if len(args) == 1:
func = args[0]
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
A wrapped which tries to get ``app`` from ``kwargs`` or creates a
new ``app`` otherwise, and actives the application context, so the
decorated function is run inside the application context.
"""
app = kwargs.pop('app', None)
if app is None:
from app import create_app
app = create_app()
with app.app_context():
return func(*args, **kwargs)
# This is the default in Python 3, so we just make it backwards
# compatible with Python 2
if not hasattr(wrapper, '__wrapped__'):
wrapper.__wrapped__ = func
return Task(wrapper, **kwargs)
return lambda func: app_context_task(func, **kwargs)
| 29.971429
| 78
| 0.589609
|
4a064bcb290b611f8091f4d045cb4ab31651cc6f
| 1,746
|
py
|
Python
|
tests/Unit/Elliptic/Systems/Elasticity/Equations.py
|
nilsvu/spectre
|
1455b9a8d7e92db8ad600c66f54795c29c3052ee
|
[
"MIT"
] | 117
|
2017-04-08T22:52:48.000Z
|
2022-03-25T07:23:36.000Z
|
tests/Unit/Elliptic/Systems/Elasticity/Equations.py
|
GitHimanshuc/spectre
|
4de4033ba36547113293fe4dbdd77591485a4aee
|
[
"MIT"
] | 3,177
|
2017-04-07T21:10:18.000Z
|
2022-03-31T23:55:59.000Z
|
tests/Unit/Elliptic/Systems/Elasticity/Equations.py
|
geoffrey4444/spectre
|
9350d61830b360e2d5b273fdd176dcc841dbefb0
|
[
"MIT"
] | 85
|
2017-04-07T19:36:13.000Z
|
2022-03-01T10:21:00.000Z
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def constitutive_relation_2d(strain, bulk_modulus, shear_modulus):
lame_constant = bulk_modulus - 2. / 3. * shear_modulus
return -2. * shear_modulus * lame_constant / (
lame_constant + 2. * shear_modulus
) * np.trace(strain) * np.eye(2) - 2. * shear_modulus * strain
def constitutive_relation_3d(strain, bulk_modulus, shear_modulus):
lame_constant = bulk_modulus - 2. / 3. * shear_modulus
return -2. * shear_modulus * strain - lame_constant * np.trace(
strain) * np.eye(3)
def primal_fluxes_2d(strain, coordinates, bulk_modulus, shear_modulus):
return -constitutive_relation_2d(strain, bulk_modulus, shear_modulus)
def primal_fluxes_3d(strain, coordinates, bulk_modulus, shear_modulus):
return -constitutive_relation_3d(strain, bulk_modulus, shear_modulus)
def add_curved_sources(christoffel_second_kind, christoffel_contracted,
stress):
return (-np.einsum('i,ij', christoffel_contracted, stress) -
np.einsum('ijk,jk', christoffel_second_kind, stress))
def auxiliary_fluxes(displacement):
dim = len(displacement)
# Compute the tensor product with a Kronecker delta and symmetrize the last
# two indices.
tensor_product = np.tensordot(np.eye(dim), displacement, axes=0)
return 0.5 * (tensor_product + np.transpose(tensor_product, (0, 2, 1)))
def curved_auxiliary_fluxes(metric, displacement):
co_displacement = np.einsum('ij,j', metric, displacement)
return auxiliary_fluxes(co_displacement)
def add_curved_auxiliary_sources(christoffel_first_kind, displacement):
return np.einsum('ijk,i', christoffel_first_kind, displacement)
| 35.632653
| 79
| 0.736541
|
4a064c67dccaeea9b2e3db4748f8772b62ef20e0
| 5,221
|
py
|
Python
|
Fourier/MellinTransform/Mellin_NegTempStable_European_Price.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | null | null | null |
Fourier/MellinTransform/Mellin_NegTempStable_European_Price.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | null | null | null |
Fourier/MellinTransform/Mellin_NegTempStable_European_Price.py
|
mattslezak-shell/PROJ_Option_Pricing_Matlab
|
6105bd00ba3471802180c122fdf81e90833a91c4
|
[
"MIT"
] | 1
|
2022-01-07T15:31:45.000Z
|
2022-01-07T15:31:45.000Z
|
# Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# Mellin_NegTempStable_European_Price.m
@function
def Mellin_NegTempStable_European_Price(S_0=None,W=None,T=None,r=None,q=None,call=None,sigma=None,alpha=None,lambda_=None,N1=None,tol=None,*args,**kwargs):
varargin = Mellin_NegTempStable_European_Price.varargin
nargin = Mellin_NegTempStable_European_Price.nargin
#########################################################
# About: Pricing Function for European Options using Mellin Transform
# Models Supported: Negative Tempered Stable
# Returns: price of contract
# Author: Justin Lars Kirkby/ Jean-Philippe Aguilar
# Reference: 1) "Closed-form option pricing in exponential Levy models", Aguilar and Kirkby, 2021
# ----------------------
# Contract/Model Params
# ----------------------
# S_0 = initial stock price (e.g. 100)
# W = strike (e.g. 100)
# r = interest rate (e.g. 0.05)
# q = dividend yield (e.g. 0.05)
# T = time remaining until maturity (in years, e.g. T=1)
# call = 1 for call (else put)
# sigma = param in model
# alpha = param in model
# lambda = param in model
# ----------------------
# Numerical Params
# ----------------------
# N1 = maximum number summation terms in the series, will sum fewer terms
# if error threshold (tol) is reached
# tol = desired error threshold of price (will stop adding terms once satisfied)
#########################################################
if nargin < 11:
tol=0
# Mellin_NegTempStable_European_Price.m:32
N2=copy(N1)
# Mellin_NegTempStable_European_Price.m:35
N3=copy(N1)
# Mellin_NegTempStable_European_Price.m:36
F=dot(W,exp(dot(- r,T)))
# Mellin_NegTempStable_European_Price.m:38
w_=sigma ** alpha / cos(dot(alpha,pi) / 2)
# Mellin_NegTempStable_European_Price.m:39
wts_=dot(w_,((lambda_ + 1) ** alpha - lambda_ ** alpha))
# Mellin_NegTempStable_European_Price.m:40
c_=- w_ / gamma(- alpha)
# Mellin_NegTempStable_European_Price.m:41
k_=log(S_0 / W) + dot((r - q + wts_),T)
# Mellin_NegTempStable_European_Price.m:43
sum=0
# Mellin_NegTempStable_European_Price.m:44
last=0
# Mellin_NegTempStable_European_Price.m:45
cons=dot(F,exp(dot(dot(lambda_ ** alpha,w_),T))) / alpha
# Mellin_NegTempStable_European_Price.m:46
tol=tol / cons
# Mellin_NegTempStable_European_Price.m:47
wt=dot(- w_,T)
# Mellin_NegTempStable_European_Price.m:49
start_N3=1
# Mellin_NegTempStable_European_Price.m:50
for n1 in arange(0,N1).reshape(-1):
fn1=factorial(n1)
# Mellin_NegTempStable_European_Price.m:53
for n2 in arange(0,N2).reshape(-1):
fn2=factorial(n2)
# Mellin_NegTempStable_European_Price.m:55
for n3 in arange(start_N3,N3).reshape(-1):
g=pochhammer(1 - n1 + n3,n2)
# Mellin_NegTempStable_European_Price.m:57
c=(n1 - n2 - n3) / alpha
# Mellin_NegTempStable_European_Price.m:58
term=dot(dot(dot(g,lambda_ ** n2),k_ ** n1),wt ** (- c)) / (dot(dot(fn1,fn2),gamma(1 - c)))
# Mellin_NegTempStable_European_Price.m:59
sum=sum + term
# Mellin_NegTempStable_European_Price.m:60
if n1 > 1 and abs(sum - last) < tol:
break
last=copy(sum)
# Mellin_NegTempStable_European_Price.m:66
price=dot(cons,sum)
# Mellin_NegTempStable_European_Price.m:69
if call != 1:
price=price - (dot(S_0,exp(dot(- q,T))) - dot(W,exp(dot(- r,T))))
# Mellin_NegTempStable_European_Price.m:72
return price
if __name__ == '__main__':
pass
@function
def pochhammer(a=None,n=None,*args,**kwargs):
varargin = pochhammer.varargin
nargin = pochhammer.nargin
if (a == 0 and n <= 0) or (n == 0 and a > 0):
p=1
# Mellin_NegTempStable_European_Price.m:80
else:
if a == 0 and n > 0:
p=0
# Mellin_NegTempStable_European_Price.m:82
else:
if a > 0:
if n == 1:
p=copy(a)
# Mellin_NegTempStable_European_Price.m:85
else:
if n > 0:
p=prod(arange(a,a + n - 1))
# Mellin_NegTempStable_European_Price.m:87
else:
p=copy(inf)
# Mellin_NegTempStable_European_Price.m:90
else:
p=neg_poch(a,n)
# Mellin_NegTempStable_European_Price.m:93
return p
if __name__ == '__main__':
pass
@function
def neg_poch(m=None,n=None,*args,**kwargs):
varargin = neg_poch.varargin
nargin = neg_poch.nargin
# Used for (-m)_n, m >= 1
m=- m
# Mellin_NegTempStable_European_Price.m:101
if n > m:
p=0
# Mellin_NegTempStable_European_Price.m:104
else:
p=dot((- 1) ** n,factorial(m)) / factorial(m - n)
# Mellin_NegTempStable_European_Price.m:106
return p
if __name__ == '__main__':
pass
| 32.228395
| 156
| 0.600843
|
4a064c6c341d973fe8510d6320981bffdb6fd74e
| 1,889
|
py
|
Python
|
setup.py
|
tsammalex/pytsammalex
|
9686ca580617b94f0d6d7ef61a3ad66082a98319
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tsammalex/pytsammalex
|
9686ca580617b94f0d6d7ef61a3ad66082a98319
|
[
"Apache-2.0"
] | 2
|
2020-10-06T10:17:13.000Z
|
2020-10-12T18:37:52.000Z
|
setup.py
|
tsammalex/pytsammalex
|
9686ca580617b94f0d6d7ef61a3ad66082a98319
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='pytsammalex',
version='0.1.1.dev0',
author='Robert Forkel',
author_email='forkel@shh.mpg.de',
description='Python library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='',
license='Apache 2.0',
url='https://github.com/tsammalex/pytsammalex',
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'tsammalex=pytsammalex.__main__:main',
],
},
platforms='any',
python_requires='>=3.5',
install_requires=[
'appdirs',
'pygbif',
'csvw>=1.5.6',
'clldutils>=3.5',
'attrs',
'nameparser',
'requests',
'newick',
],
extras_require={
'lexibank': [
'pylexibank',
],
'clld': [
'pycldf>=1.16',
'clld',
],
'dev': ['flake8', 'wheel', 'twine'],
'test': [
'mock',
'pytest>=3.6',
'pytest-mock',
'pytest-cov',
'coverage>=4.2',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
| 27.779412
| 70
| 0.532028
|
4a064ec7a95fd49534b506249a30dd00e2d90a46
| 5,413
|
py
|
Python
|
projects/vdk-plugins/vdk-kerberos-auth/src/vdk/plugin/kerberos/kerberos_configuration.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 100
|
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/vdk-kerberos-auth/src/vdk/plugin/kerberos/kerberos_configuration.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 208
|
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/vdk-kerberos-auth/src/vdk/plugin/kerberos/kerberos_configuration.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 14
|
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from pathlib import Path
from typing import Optional
from vdk.internal.core import errors
from vdk.internal.core.config import Configuration
from vdk.internal.core.config import ConfigurationBuilder
KRB_AUTH = "KRB_AUTH"
KEYTAB_FOLDER = "KEYTAB_FOLDER"
KRB5_CONF_FILENAME = "KRB5_CONF_FILENAME"
KEYTAB_FILENAME = "KEYTAB_FILENAME"
KEYTAB_PRINCIPAL = "KEYTAB_PRINCIPAL"
KEYTAB_REALM = "KEYTAB_REALM"
KERBEROS_KDC_HOST = "KERBEROS_KDC_HOST"
KRB_AUTH_FAIL_FAST = "KRB_AUTH_FAIL_FAST"
class KerberosPluginConfiguration:
def __init__(
self,
job_name: Optional[str],
job_directory: Optional[str],
config: Configuration,
):
self.__job_name = job_name
self.__job_directory = job_directory
self.__config = config
def authentication_type(self):
return self.__config.get_value(KRB_AUTH)
def keytab_folder(self):
keytab_folder = self.__config.get_value(KEYTAB_FOLDER)
if keytab_folder is None and self.__job_directory:
keytab_folder = Path(self.__job_directory).parent
return keytab_folder
def keytab_filename(self):
keytab_filename = self.__config.get_value(KEYTAB_FILENAME)
if keytab_filename is None and self.__job_name:
keytab_filename = f"{self.__job_name}.keytab"
return keytab_filename
def keytab_pathname(self):
keytab_folder = self.keytab_folder()
keytab_filename = self.keytab_filename()
if not keytab_filename:
errors.log_and_throw(
to_be_fixed_by=errors.ResolvableBy.CONFIG_ERROR,
log=logging.getLogger(__name__),
what_happened="Cannot find keytab file location.",
why_it_happened="Keytab filename cannot be inferred from configuration.",
consequences=errors.MSG_CONSEQUENCE_DELEGATING_TO_CALLER__LIKELY_EXECUTION_FAILURE,
countermeasures="Provide configuration variables KEYTAB_FILENAME KEYTAB_FOLDER. "
"During vdk run they are automatically inferred from data job location "
"but for other commands they need to be explicitly set.",
)
if keytab_folder:
return os.path.join(keytab_folder, keytab_filename)
return keytab_filename
def krb5_conf_filename(self):
return self.__config.get_value(KRB5_CONF_FILENAME)
def keytab_principal(self):
keytab_principal = self.__config.get_value(KEYTAB_PRINCIPAL)
if keytab_principal is None and self.__job_name:
keytab_principal = f"pa__view_{self.__job_name}"
return keytab_principal
def keytab_realm(self):
return self.__config.get_value(KEYTAB_REALM)
def kerberos_host(self):
return self.__config.get_value(KERBEROS_KDC_HOST)
def auth_fail_fast(self) -> bool:
return self.__config.get_value(KRB_AUTH_FAIL_FAST)
def add_definitions(config_builder: ConfigurationBuilder) -> None:
config_builder.add(
key=KRB_AUTH,
default_value=None,
description="Specifies the Kerberos authentication type to use. "
"Possible values are 'minikerberos' and 'kinit'. "
"If left empty, the authentication is disabled.",
)
config_builder.add(
key=KEYTAB_FILENAME,
default_value=None,
description="Specifies the name of the keytab file. "
"If left empty, the name of the keytab file is assumed to be the same "
"as the name of the data job with '.keytab' suffix.",
)
config_builder.add(
key=KEYTAB_FOLDER,
default_value=None,
description="Specifies the folder containing the keytab file. "
"If left empty, the keytab file is expected to be located inside the data job folder.",
)
config_builder.add(
key=KRB5_CONF_FILENAME,
default_value=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "krb5.conf"
),
description="Specifies the path to the krb5.conf file that should supply Kerberos configuration.",
)
config_builder.add(
key=KEYTAB_PRINCIPAL,
default_value=None,
description="Specifies the Kerberos principal. "
"If left empty, the principal will be the job name prepended with 'pa__view_'.",
)
config_builder.add(
key=KEYTAB_REALM,
default_value="default_realm",
description="Specifies the Kerberos realm. This value is used only with "
"the 'minikerberos' authentication type.",
)
config_builder.add(
key=KERBEROS_KDC_HOST,
default_value="localhost",
description="Specifies the name of the Kerberos KDC (Key Distribution Center) host. "
"This value is used only with the 'minikerberos' authentication type.",
)
config_builder.add(
key=KRB_AUTH_FAIL_FAST,
default_value=False,
description="Specify if the authenticator must raise exception "
"if it fails to successfully authenticate with kerberos (basically - kinit). "
"If set to false, only warning will be logged on authentication failure. "
"Subsequent kerberos related requests may fail but that'd fail lazily (on demand) "
"that makes it possible for non-kerberos related features to work.",
)
| 38.664286
| 106
| 0.69056
|
4a064ef6213a0e6fed00e84243492f2567507769
| 5,990
|
py
|
Python
|
order_food_online_project/notes/views.py
|
MaksNech/pylab2018_ht_12
|
3578e36570ce99b25136942320fbcd7df956d435
|
[
"MIT"
] | null | null | null |
order_food_online_project/notes/views.py
|
MaksNech/pylab2018_ht_12
|
3578e36570ce99b25136942320fbcd7df956d435
|
[
"MIT"
] | null | null | null |
order_food_online_project/notes/views.py
|
MaksNech/pylab2018_ht_12
|
3578e36570ce99b25136942320fbcd7df956d435
|
[
"MIT"
] | 1
|
2019-03-15T03:36:34.000Z
|
2019-03-15T03:36:34.000Z
|
from .forms import NoteAddForm
from .models import Note, NotedModel
from foods.models import Dish, Order
from decimal import Decimal
from django.shortcuts import render
from django.urls import reverse
from django.http import Http404
from django.http import HttpResponseRedirect
from django.db.models import Q
def note_list(request):
notes = Note.objects.all()
return render(request, 'notes/note_list.html', context={'notes': notes})
def note_search(request):
query = request.GET['query']
if query:
notes = Note.objects.filter(Q(title__icontains=query) | Q(description__icontains=query))
return render(request, 'notes/note_list.html', context={'notes': notes})
def note_add(request):
if request.user.has_perm('notes.add_note'):
if request.method == "POST":
dish_id = request.POST.get("noted_dish_select")
order_id = request.POST.get("noted_order_select")
if dish_id:
model_instance = Dish.objects.get(id=dish_id)
if order_id:
model_instance = Order.objects.get(id=order_id)
form = NoteAddForm(request.POST, request.FILES)
if form.is_valid():
new_note = form.save()
NotedModel.objects.create(content_object=model_instance, note=new_note)
return HttpResponseRedirect(reverse('note_view', kwargs={'note_id': new_note.id}))
form = NoteAddForm()
dishes = Dish.objects.all()
orders = Order.objects.all()
return render(request, 'notes/note_add.html', context={'form': form, 'dishes': dishes, 'orders': orders})
else:
request.session['permission_codename'] = 'add_note'
return HttpResponseRedirect(reverse('permission_denied'))
def note_view(request, note_id):
note = Note.objects.get(id=note_id)
try:
noted_model = NotedModel.objects.get(note__id=note.id)
except NotedModel.DoesNotExist:
noted_model = None
model_instance = None
model_instance_class = None
if noted_model:
model_instance = noted_model.content_object.__class__.objects.get(id=noted_model.content_object.id)
model_instance_class = str(noted_model.content_object.__class__.__name__.lower())
if note:
return render(request, 'notes/note_view.html', context={'note': note, 'model_instance': model_instance,
'model_instance_class': model_instance_class})
def note_edit(request, note_id):
if request.user.has_perm('notes.change_note'):
note = Note.objects.get(id=note_id)
if note:
if request.method == "POST":
dish_id = request.POST.get("noted_dish_select")
order_id = request.POST.get("noted_order_select")
model_instance = None
if dish_id:
model_instance = Dish.objects.get(id=dish_id)
if order_id:
model_instance = Order.objects.get(id=order_id)
form = NoteAddForm(request.POST, request.FILES, instance=note)
if form.is_valid():
edit_note = form.save()
NotedModel.objects.filter(note__id=edit_note.id).delete()
NotedModel.objects.create(content_object=model_instance, note=edit_note)
return HttpResponseRedirect(reverse('note_view', kwargs={'note_id': edit_note.id}))
form = NoteAddForm()
dishes = Dish.objects.all()
orders = Order.objects.all()
try:
noted_model = NotedModel.objects.get(note__id=note.id)
except NotedModel.DoesNotExist:
noted_model = None
dish_id = dish_select_disabled = dish_select_required = None
order_id = order_select_disabled = order_select_required = None
if noted_model:
if noted_model.content_object.__class__.__name__ == 'Dish':
dish_id = noted_model.content_object.id
order_id = None
dish_select_disabled = False
dish_select_required = True
order_select_disabled = True
order_select_required = False
if noted_model.content_object.__class__.__name__ == 'Order':
order_id = noted_model.content_object.id
dish_id = None
dish_select_disabled = True
dish_select_required = False
order_select_disabled = False
order_select_required = True
return render(request, 'notes/note_edit.html',
context={'note': note,
'form': form,
'dishes': dishes,
'orders': orders,
'dish_id': dish_id,
'order_id': order_id,
'dish_select_disabled': dish_select_disabled,
'dish_select_required': dish_select_required,
'order_select_disabled': order_select_disabled,
'order_select_required': order_select_required,
})
else:
request.session['permission_codename'] = 'change_note'
return HttpResponseRedirect(reverse('permission_denied'))
def note_delete(request, note_id):
if request.user.has_perm('notes.delete_note'):
note = Note.objects.get(id=note_id)
if note:
if request.method == "POST":
note.delete()
return HttpResponseRedirect(reverse('note_list'))
else:
request.session['permission_codename'] = 'delete_note'
return HttpResponseRedirect(reverse('permission_denied'))
| 41.027397
| 113
| 0.590317
|
4a064f435e48ed91d937a95fb6e176bfce48f4bd
| 10,812
|
py
|
Python
|
tests/test_interface_type.py
|
mirumee/ariadne-graphql-modules
|
f95a48f428a49aa39d41ec91a2dac647b6869f86
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_interface_type.py
|
mirumee/ariadne-graphql-modules
|
f95a48f428a49aa39d41ec91a2dac647b6869f86
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_interface_type.py
|
mirumee/ariadne-graphql-modules
|
f95a48f428a49aa39d41ec91a2dac647b6869f86
|
[
"BSD-3-Clause"
] | null | null | null |
from dataclasses import dataclass
import pytest
from ariadne import SchemaDirectiveVisitor
from graphql import GraphQLError, graphql_sync
from ariadne_graphql_modules import (
DeferredType,
DirectiveType,
InterfaceType,
ObjectType,
make_executable_schema,
)
def test_interface_type_raises_attribute_error_when_defined_without_schema(snapshot):
with pytest.raises(AttributeError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
pass
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_invalid_schema_type(snapshot):
with pytest.raises(TypeError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = True
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_invalid_schema_str(snapshot):
with pytest.raises(GraphQLError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = "interfaco Example"
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_invalid_graphql_type_schema(
snapshot,
):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = "type Example"
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_multiple_types_schema(snapshot):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example
interface Other
"""
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_without_fields(snapshot):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = "interface Example"
snapshot.assert_match(err)
def test_interface_type_extracts_graphql_name():
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
id: ID!
}
"""
assert ExampleInterface.graphql_name == "Example"
def test_interface_type_raises_error_when_defined_without_return_type_dependency(
snapshot,
):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
group: Group
groups: [Group!]
}
"""
snapshot.assert_match(err)
def test_interface_type_verifies_field_dependency():
# pylint: disable=unused-variable
class GroupType(ObjectType):
__schema__ = """
type Group {
id: ID!
}
"""
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
group: Group
groups: [Group!]
}
"""
__requires__ = [GroupType]
def test_interface_type_verifies_circural_dependency():
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
parent: Example
}
"""
def test_interface_type_raises_error_when_defined_without_argument_type_dependency(
snapshot,
):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
actions(input: UserInput): [String!]!
}
"""
snapshot.assert_match(err)
def test_interface_type_verifies_circular_dependency_using_deferred_type():
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
id: ID!
users: [User]
}
"""
__requires__ = [DeferredType("User")]
class UserType(ObjectType):
__schema__ = """
type User {
roles: [Example]
}
"""
__requires__ = [ExampleInterface]
def test_interface_type_can_be_extended_with_new_fields():
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
id: ID!
}
"""
class ExtendExampleInterface(InterfaceType):
__schema__ = """
extend interface Example {
name: String
}
"""
__requires__ = [ExampleInterface]
def test_interface_type_can_be_extended_with_directive():
# pylint: disable=unused-variable
class ExampleDirective(DirectiveType):
__schema__ = "directive @example on INTERFACE"
__visitor__ = SchemaDirectiveVisitor
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
id: ID!
}
"""
class ExtendExampleInterface(InterfaceType):
__schema__ = """
extend interface Example @example
"""
__requires__ = [ExampleInterface, ExampleDirective]
def test_interface_type_can_be_extended_with_other_interface():
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface Example {
id: ID!
}
"""
class OtherInterface(InterfaceType):
__schema__ = """
interface Other {
depth: Int!
}
"""
class ExtendExampleInterface(InterfaceType):
__schema__ = """
extend interface Example implements Other
"""
__requires__ = [ExampleInterface, OtherInterface]
def test_interface_type_raises_error_when_defined_without_extended_dependency(snapshot):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExtendExampleInterface(ObjectType):
__schema__ = """
extend interface Example {
name: String
}
"""
snapshot.assert_match(err)
def test_interface_type_raises_error_when_extended_dependency_is_wrong_type(snapshot):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleType(ObjectType):
__schema__ = """
type Example {
id: ID!
}
"""
class ExampleInterface(InterfaceType):
__schema__ = """
extend interface Example {
name: String
}
"""
__requires__ = [ExampleType]
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_alias_for_nonexisting_field(
snapshot,
):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface User {
name: String
}
"""
__aliases__ = {
"joinedDate": "joined_date",
}
snapshot.assert_match(err)
def test_interface_type_raises_error_when_defined_with_resolver_for_nonexisting_field(
snapshot,
):
with pytest.raises(ValueError) as err:
# pylint: disable=unused-variable
class ExampleInterface(InterfaceType):
__schema__ = """
interface User {
name: String
}
"""
@staticmethod
def resolve_group(*_):
return None
snapshot.assert_match(err)
@dataclass
class User:
id: int
name: str
summary: str
@dataclass
class Comment:
id: int
message: str
summary: str
class ResultInterface(InterfaceType):
__schema__ = """
interface Result {
summary: String!
score: Int!
}
"""
@staticmethod
def resolve_type(instance, *_):
if isinstance(instance, Comment):
return "Comment"
if isinstance(instance, User):
return "User"
return None
@staticmethod
def resolve_score(*_):
return 42
class UserType(ObjectType):
__schema__ = """
type User implements Result {
id: ID!
name: String!
summary: String!
score: Int!
}
"""
__requires__ = [ResultInterface]
class CommentType(ObjectType):
__schema__ = """
type Comment implements Result {
id: ID!
message: String!
summary: String!
score: Int!
}
"""
__requires__ = [ResultInterface]
@staticmethod
def resolve_score(*_):
return 16
class QueryType(ObjectType):
__schema__ = """
type Query {
results: [Result!]!
}
"""
__requires__ = [ResultInterface]
@staticmethod
def resolve_results(*_):
return [
User(id=1, name="Alice", summary="Summary for Alice"),
Comment(id=1, message="Hello world!", summary="Summary for comment"),
]
schema = make_executable_schema(QueryType, UserType, CommentType)
def test_interface_type_binds_type_resolver():
query = """
query {
results {
... on User {
__typename
id
name
summary
}
... on Comment {
__typename
id
message
summary
}
}
}
"""
result = graphql_sync(schema, query)
assert result.data == {
"results": [
{
"__typename": "User",
"id": "1",
"name": "Alice",
"summary": "Summary for Alice",
},
{
"__typename": "Comment",
"id": "1",
"message": "Hello world!",
"summary": "Summary for comment",
},
],
}
def test_interface_type_binds_field_resolvers_to_implementing_types_fields():
query = """
query {
results {
... on User {
__typename
score
}
... on Comment {
__typename
score
}
}
}
"""
result = graphql_sync(schema, query)
assert result.data == {
"results": [
{
"__typename": "User",
"score": 42,
},
{
"__typename": "Comment",
"score": 16,
},
],
}
| 23.973392
| 88
| 0.583333
|
4a064f82a240c14d4e21054840223ab5478238bc
| 9,096
|
py
|
Python
|
apps/blog/migrations/0001_initial.py
|
linuxiston/linuxiston
|
8e106c206ce3e9b4b80a12ab79d320ad0f4c2970
|
[
"MIT"
] | 3
|
2022-03-21T11:56:04.000Z
|
2022-03-21T12:13:13.000Z
|
apps/blog/migrations/0001_initial.py
|
linuxiston/linuxiston
|
8e106c206ce3e9b4b80a12ab79d320ad0f4c2970
|
[
"MIT"
] | 1
|
2022-03-24T10:55:31.000Z
|
2022-03-24T10:55:31.000Z
|
apps/blog/migrations/0001_initial.py
|
linuxiston/linuxiston
|
8e106c206ce3e9b4b80a12ab79d320ad0f4c2970
|
[
"MIT"
] | 2
|
2022-03-21T11:57:36.000Z
|
2022-03-21T12:13:14.000Z
|
# Generated by Django 4.0.3 on 2022-04-05 11:06
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Author",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("full_name", models.CharField(max_length=50)),
("avatar", models.ImageField(upload_to="author-avatars")),
("bio", models.CharField(max_length=300)),
("telegram", models.URLField()),
("instagram", models.URLField()),
("youtube", models.URLField()),
("github", models.URLField()),
],
),
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("category", models.CharField(max_length=30)),
],
options={
"verbose_name": "category",
"verbose_name_plural": "Categories",
},
),
migrations.CreateModel(
name="Faq",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=500)),
("description", models.CharField(max_length=1000)),
("created", models.DateTimeField(auto_now_add=True)),
("active", models.BooleanField(default=True)),
],
options={
"verbose_name": "Faq",
"verbose_name_plural": "FAQ",
"ordering": ("-created",),
},
),
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("tag", models.CharField(max_length=20)),
],
options={
"verbose_name": "tag",
"verbose_name_plural": "Tags",
},
),
migrations.CreateModel(
name="VideoPost",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200)),
("video_url", models.URLField()),
("description", models.CharField(max_length=300)),
("body", ckeditor.fields.RichTextField()),
("created", models.DateTimeField(auto_now_add=True)),
("thumbnail", models.ImageField(upload_to="post-thumbnails")),
("slug", models.SlugField()),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="blog.author"
),
),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to="blog.category"
),
),
(
"likes",
models.ManyToManyField(
blank=True, related_name="vlikes", to=settings.AUTH_USER_MODEL
),
),
("tags", models.ManyToManyField(to="blog.tag")),
],
options={
"verbose_name": "Video post",
"verbose_name_plural": "Video blog posts",
"ordering": ("-created",),
},
),
migrations.CreateModel(
name="VideoComment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("comment", models.CharField(max_length=500)),
("created", models.DateTimeField(auto_now_add=True)),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"post",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="video_comments",
to="blog.videopost",
),
),
],
options={
"verbose_name": "video comment",
"verbose_name_plural": "Video Comments",
"ordering": ("-created",),
},
),
migrations.CreateModel(
name="Post",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200)),
("description", models.CharField(max_length=300)),
("body", ckeditor.fields.RichTextField()),
("created", models.DateTimeField(auto_now_add=True)),
("thumbnail", models.ImageField(upload_to="post-thumbnails")),
("slug", models.SlugField()),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="blog.author"
),
),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to="blog.category"
),
),
(
"likes",
models.ManyToManyField(
blank=True, related_name="likes", to=settings.AUTH_USER_MODEL
),
),
("tags", models.ManyToManyField(to="blog.tag")),
],
options={
"verbose_name": "post",
"verbose_name_plural": "Blog posts",
"ordering": ("-created",),
},
),
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("comment", models.CharField(max_length=500)),
("created", models.DateTimeField(auto_now_add=True)),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"post",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="blog.post",
),
),
],
options={
"verbose_name": "comment",
"verbose_name_plural": "Comments",
"ordering": ("-created",),
},
),
]
| 34.717557
| 87
| 0.388303
|
4a0650c4b3130261fdf97dbf46d70b22cb378a66
| 434
|
py
|
Python
|
telegram_gcloner/utils/callback.py
|
fasih4444/clonebot
|
b2750a6d53e1446ed43663543e87a3c32c47a918
|
[
"MIT"
] | 18
|
2021-09-12T14:56:26.000Z
|
2022-02-19T09:33:36.000Z
|
telegram_gcloner/utils/callback.py
|
fasih4444/clonebot
|
b2750a6d53e1446ed43663543e87a3c32c47a918
|
[
"MIT"
] | 1
|
2022-01-29T08:38:43.000Z
|
2022-01-29T08:38:43.000Z
|
telegram_gcloner/utils/callback.py
|
fasih4444/clonebot
|
b2750a6d53e1446ed43663543e87a3c32c47a918
|
[
"MIT"
] | 60
|
2021-09-11T04:23:21.000Z
|
2022-03-30T07:24:57.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from telegram.ext import CallbackContext
logger = logging.getLogger(__name__)
def callback_delete_message(context: CallbackContext):
(chat_id, message_id) = context.job.context
try:
context.bot.delete_message(chat_id=chat_id, message_id=message_id)
except Exception as e:
logger.warning('Could not delete message {}: {}'.format(message_id, e))
| 27.125
| 79
| 0.723502
|
4a065100566beba613214f311c84e1093158b0db
| 3,241
|
py
|
Python
|
data/unaligned_data_loader.py
|
fducau/pytorch-CycleGAN-and-pix2pix
|
63761c5d8abea169d8a03a73c0bae99aaf036b6a
|
[
"BSD-3-Clause"
] | 1
|
2021-02-19T02:40:35.000Z
|
2021-02-19T02:40:35.000Z
|
data/unaligned_data_loader.py
|
DeepModel/pytorch-CycleGAN-and-pix2pix
|
63761c5d8abea169d8a03a73c0bae99aaf036b6a
|
[
"BSD-3-Clause"
] | null | null | null |
data/unaligned_data_loader.py
|
DeepModel/pytorch-CycleGAN-and-pix2pix
|
63761c5d8abea169d8a03a73c0bae99aaf036b6a
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T16:37:29.000Z
|
2021-02-24T16:37:29.000Z
|
import torch.utils.data
import torchvision.transforms as transforms
from data.base_data_loader import BaseDataLoader
from data.image_folder import ImageFolder
from builtins import object
from pdb import set_trace as st
class PairedData(object):
def __init__(self, data_loader_A, data_loader_B):
self.data_loader_A = data_loader_A
self.data_loader_B = data_loader_B
self.stop_A = False
self.stop_B = False
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.data_loader_A_iter = iter(self.data_loader_A)
self.data_loader_B_iter = iter(self.data_loader_B)
return self
def __next__(self):
A, A_paths = None, None
B, B_paths = None, None
try:
A, A_paths = next(self.data_loader_A_iter)
except StopIteration:
if A is None or A_paths is None:
self.stop_A = True
self.data_loader_A_iter = iter(self.data_loader_A)
A, A_paths = next(self.data_loader_A_iter)
try:
B, B_paths = next(self.data_loader_B_iter)
except StopIteration:
if B is None or B_paths is None:
self.stop_B = True
self.data_loader_B_iter = iter(self.data_loader_B)
B, B_paths = next(self.data_loader_B_iter)
if self.stop_A and self.stop_B:
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
return {'A': A, 'A_paths': A_paths,
'B': B, 'B_paths': B_paths}
class UnalignedDataLoader(BaseDataLoader):
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
transform = transforms.Compose([
transforms.Scale(opt.loadSize),
transforms.CenterCrop(opt.fineSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])
# Dataset A
dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A',
transform=transform, return_paths=True)
data_loader_A = torch.utils.data.DataLoader(
dataset_A,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
# Dataset B
dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B',
transform=transform, return_paths=True)
data_loader_B = torch.utils.data.DataLoader(
dataset_B,
batch_size=self.opt.batchSize,
shuffle=not self.opt.serial_batches,
num_workers=int(self.opt.nThreads))
self.dataset_A = dataset_A
self.dataset_B = dataset_B
self.paired_data = PairedData(data_loader_A, data_loader_B)
def name(self):
return 'UnalignedDataLoader'
def load_data(self):
return self.paired_data
def __len__(self):
return max(len(self.dataset_A), len(self.dataset_B))
| 36.829545
| 78
| 0.576057
|
4a0651625baeb8ffb4e3b83c3850128918f40a52
| 53,368
|
py
|
Python
|
src/sentry/event_manager.py
|
QuincyMa/sentry
|
9b0401de06185a3371f9b51ce8f4732500830f50
|
[
"BSD-3-Clause"
] | 1
|
2019-01-18T08:58:11.000Z
|
2019-01-18T08:58:11.000Z
|
src/sentry/event_manager.py
|
QuincyMa/sentry
|
9b0401de06185a3371f9b51ce8f4732500830f50
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/event_manager.py
|
QuincyMa/sentry
|
9b0401de06185a3371f9b51ce8f4732500830f50
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import os
import six
import jsonschema
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from sentry import buffer, eventtypes, eventstream, features, tsdb, filters
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, LOG_LEVELS_MAP, DEFAULT_LOG_LEVEL,
DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH, VALID_PLATFORMS, MAX_TAG_VALUE_LENGTH,
CLIENT_IGNORED_ATTRS,
)
from sentry.coreapi import (
APIError,
APIForbidden,
decompress_gzip,
decompress_deflate,
decode_and_decompress_data,
decode_data,
safely_load_json_string,
)
from sentry.interfaces.base import get_interface, prune_empty_keys
from sentry.interfaces.exception import normalize_mechanism_meta
from sentry.interfaces.schemas import validate_and_default_interface
from sentry.lang.native.utils import get_sdk_from_event
from sentry.models import (
Activity, Environment, Event, EventError, EventMapping, EventUser, Group,
GroupEnvironment, GroupHash, GroupLink, GroupRelease, GroupResolution, GroupStatus,
Project, Release, ReleaseEnvironment, ReleaseProject,
ReleaseProjectEnvironment, UserReport
)
from sentry.plugins import plugins
from sentry.signals import event_discarded, event_saved, first_event_received
from sentry.tasks.integrations import kick_off_status_syncs
from sentry.utils import metrics
from sentry.utils.cache import default_cache
from sentry.utils.canonical import CanonicalKeyDict
from sentry.utils.data_filters import (
is_valid_ip,
is_valid_release,
is_valid_error_message,
FilterStatKeys,
)
from sentry.utils.dates import to_timestamp
from sentry.utils.db import is_postgres, is_mysql
from sentry.utils.meta import Meta
from sentry.utils.safe import ENABLE_TRIMMING, safe_execute, trim, trim_dict, get_path, set_path, setdefault_path
from sentry.utils.strings import truncatechars
from sentry.utils.geo import rust_geoip
from sentry.utils.validators import is_float
from sentry.utils.contexts_normalization import normalize_user_agent
from sentry.stacktraces import normalize_in_app
logger = logging.getLogger("sentry.events")
MAX_SECS_IN_FUTURE = 60
ALLOWED_FUTURE_DELTA = timedelta(seconds=MAX_SECS_IN_FUTURE)
MAX_SECS_IN_PAST = 2592000 # 30 days
SECURITY_REPORT_INTERFACES = (
"csp",
"hpkp",
"expectct",
"expectstaple",
)
ENABLE_RUST = os.environ.get("SENTRY_USE_RUST_NORMALIZER", "false").lower() in ("1", "true")
def set_tag(data, key, value):
data['tags'] = [(k, v) for k, v in data['tags'] if k != key]
data['tags'].append((key, value))
def get_event_metadata_compat(data, fallback_message):
"""This is a fallback path to getting the event metadata. This is used
by some code paths that could potentially deal with old sentry events that
do not have metadata yet. This does not happen in practice any more but
the testsuite was never adapted so the tests hit this code path constantly.
"""
etype = data.get('type') or 'default'
if 'metadata' not in data:
return eventtypes.get(etype)(data).get_metadata()
return data['metadata']
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def parse_client_as_sdk(value):
if not value:
return {}
try:
name, version = value.split("/", 1)
except ValueError:
try:
name, version = value.split(" ", 1)
except ValueError:
return {}
return {"name": name, "version": version}
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence = current_datetime - last_seen
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def generate_culprit(data, platform=None):
exceptions = get_path(data, 'exception', 'values')
if exceptions:
stacktraces = [e['stacktrace'] for e in exceptions if get_path(e, 'stacktrace', 'frames')]
else:
stacktrace = data.get('stacktrace')
if stacktrace and stacktrace.get('frames'):
stacktraces = [stacktrace]
else:
stacktraces = None
culprit = None
if not culprit and stacktraces:
from sentry.interfaces.stacktrace import Stacktrace
culprit = Stacktrace.to_python(stacktraces[-1]).get_culprit_string(
platform=platform,
)
if not culprit and data.get('request'):
culprit = get_path(data, 'request', 'url')
return truncatechars(culprit or '', MAX_CULPRIT_LENGTH)
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(
plugin.is_regression, group, event, version=1, _with_transaction=False
)
if result is not None:
return result
return True
def process_timestamp(value, meta, current_datetime=None):
original_value = value
if value is None:
return None
if is_float(value):
try:
value = datetime.fromtimestamp(float(value))
except Exception:
meta.add_error(EventError.INVALID_DATA, original_value)
return None
elif isinstance(value, six.string_types):
# all timestamps are in UTC, but the marker is optional
if value.endswith('Z'):
value = value[:-1]
if '.' in value:
# Python doesn't support long microsecond values
# https://github.com/getsentry/sentry/issues/1610
ts_bits = value.split('.', 1)
value = '%s.%s' % (ts_bits[0], ts_bits[1][:2])
fmt = '%Y-%m-%dT%H:%M:%S.%f'
else:
fmt = '%Y-%m-%dT%H:%M:%S'
try:
value = datetime.strptime(value, fmt)
except Exception:
meta.add_error(EventError.INVALID_DATA, original_value)
return None
elif not isinstance(value, datetime):
meta.add_error(EventError.INVALID_DATA, original_value)
return None
if current_datetime is None:
current_datetime = datetime.now()
if value > current_datetime + ALLOWED_FUTURE_DELTA:
meta.add_error(EventError.FUTURE_TIMESTAMP, original_value)
return None
if value < current_datetime - timedelta(days=30):
meta.add_error(EventError.PAST_TIMESTAMP, original_value)
return None
return float(value.strftime('%s'))
def sanitize_fingerprint(value):
# Special case floating point values: Only permit floats that have an exact
# integer representation in JSON to avoid rounding issues.
if isinstance(value, float):
return six.text_type(int(value)) if abs(value) < (1 << 53) else None
# Stringify known types
if isinstance(value, six.string_types + six.integer_types):
return six.text_type(value)
# Silently skip all other values
return None
def cast_fingerprint(value):
# Return incompatible values so that schema validation can emit errors
if not isinstance(value, list):
return value
return list(f for f in map(sanitize_fingerprint, value) if f is not None)
def has_pending_commit_resolution(group):
return GroupLink.objects.filter(
group_id=group.id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
).extra(
where=[
"NOT EXISTS(SELECT 1 FROM sentry_releasecommit where commit_id = sentry_grouplink.linked_id)"]
).exists()
class HashDiscarded(Exception):
pass
def scoreclause_sql(sc, connection):
db = getattr(connection, 'alias', 'default')
has_values = sc.last_seen is not None and sc.times_seen is not None
if is_postgres(db):
if has_values:
sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
else:
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif is_mysql(db):
if has_values:
sql = 'log(times_seen + %d) * 600 + %d' % (sc.times_seen, to_timestamp(sc.last_seen))
else:
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(sc)
return (sql, [])
try:
from django.db.models import Func
except ImportError:
# XXX(dramer): compatibility hack for Django 1.6
class ScoreClause(object):
def __init__(self, group=None, last_seen=None, times_seen=None, *args, **kwargs):
self.group = group
self.last_seen = last_seen
self.times_seen = times_seen
# times_seen is likely an F-object that needs the value extracted
if hasattr(self.times_seen, 'children'):
self.times_seen = self.times_seen.children[1]
super(ScoreClause, self).__init__(*args, **kwargs)
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score() if self.group else 0
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
return scoreclause_sql(self, connection)
else:
# XXX(dramer): compatibility hack for Django 1.8+
class ScoreClause(Func):
def __init__(self, group=None, last_seen=None, times_seen=None, *args, **kwargs):
self.group = group
self.last_seen = last_seen
self.times_seen = times_seen
# times_seen is likely an F-object that needs the value extracted
if hasattr(self.times_seen, 'rhs'):
self.times_seen = self.times_seen.rhs.value
super(ScoreClause, self).__init__(*args, **kwargs)
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score() if self.group else 0
def as_sql(self, compiler, connection, function=None, template=None):
return scoreclause_sql(self, connection)
def add_meta_errors(errors, meta):
for field_meta in meta:
original_value = field_meta.get().get('val')
for i, (err_type, err_data) in enumerate(field_meta.iter_errors()):
error = dict(err_data)
error['type'] = err_type
if field_meta.path:
error['name'] = field_meta.path
if i == 0 and original_value is not None:
error['value'] = original_value
errors.append(error)
def _decode_event(data, content_encoding):
if isinstance(data, six.binary_type):
if content_encoding == 'gzip':
data = decompress_gzip(data)
elif content_encoding == 'deflate':
data = decompress_deflate(data)
elif data[0] != b'{':
data = decode_and_decompress_data(data)
else:
data = decode_data(data)
if isinstance(data, six.text_type):
data = safely_load_json_string(data)
return CanonicalKeyDict(data)
class EventManager(object):
"""
Handles normalization in both the store endpoint and the save task. The
intention is to swap this class out with a reimplementation in Rust.
"""
def __init__(
self,
data,
version='5',
project=None,
client_ip=None,
user_agent=None,
auth=None,
key=None,
content_encoding=None,
for_store=True,
):
self._data = _decode_event(data, content_encoding=content_encoding)
self.version = version
self._project = project
self._client_ip = client_ip
self._user_agent = user_agent
self._auth = auth
self._key = key
self._for_store = for_store
self._normalized = False
def process_csp_report(self):
"""Only called from the CSP report endpoint."""
data = self._data
try:
interface = get_interface(data.pop('interface'))
report = data.pop('report')
except KeyError:
raise APIForbidden('No report or interface data')
# To support testing, we can either accept a built interface instance, or the raw data in
# which case we build the instance ourselves
try:
instance = (
report if isinstance(report, interface) else interface.from_raw(report)
)
except jsonschema.ValidationError as e:
raise APIError('Invalid security report: %s' % str(e).splitlines()[0])
def clean(d):
return dict(filter(lambda x: x[1], d.items()))
data.update(
{
'logger': 'csp',
'message': instance.get_message(),
'culprit': instance.get_culprit(),
instance.path: instance.to_json(),
'tags': instance.get_tags(),
'errors': [],
'user': {'ip_address': self._client_ip},
# Construct a faux Http interface based on the little information we have
# This is a bit weird, since we don't have nearly enough
# information to create an Http interface, but
# this automatically will pick up tags for the User-Agent
# which is actually important here for CSP
'request': {
'url': instance.get_origin(),
'headers': clean(
{
'User-Agent': self._user_agent,
'Referer': instance.get_referrer(),
}
),
},
}
)
self._data = data
def normalize(self):
if self._normalized:
raise RuntimeError('Already normalized')
self._normalized = True
if ENABLE_RUST:
from semaphore.processing import StoreNormalizer
rust_normalizer = StoreNormalizer(
geoip_lookup=rust_geoip,
project_id=self._project.id if self._project else None,
client_ip=self._client_ip,
client=self._auth.client if self._auth else None,
is_public_auth=self._auth.is_public if self._auth else False,
key_id=six.text_type(self._key.id) if self._key else None,
protocol_version=six.text_type(self.version) if self.version is not None else None,
stacktrace_frames_hard_limit=settings.SENTRY_STACKTRACE_FRAMES_HARD_LIMIT,
valid_platforms=list(VALID_PLATFORMS),
max_secs_in_future=MAX_SECS_IN_FUTURE,
max_secs_in_past=MAX_SECS_IN_PAST,
enable_trimming=ENABLE_TRIMMING,
)
self._data = CanonicalKeyDict(rust_normalizer.normalize_event(dict(self._data)))
normalize_user_agent(self._data)
return
data = self._data
# Before validating with a schema, attempt to cast values to their desired types
# so that the schema doesn't have to take every type variation into account.
text = six.text_type
def to_values(v):
return {'values': v} if v and isinstance(v, (tuple, list)) else v
casts = {
'environment': lambda v: text(v) if v is not None else v,
'event_id': lambda v: v.lower(),
'fingerprint': cast_fingerprint,
'release': lambda v: text(v) if v is not None else v,
'dist': lambda v: text(v).strip() if v is not None else v,
'time_spent': lambda v: int(v) if v is not None else v,
'tags': lambda v: [(text(v_k).replace(' ', '-').strip(), text(v_v).strip()) for (v_k, v_v) in dict(v).items()],
'platform': lambda v: v if v in VALID_PLATFORMS else 'other',
'logentry': lambda v: {'message': v} if (v and not isinstance(v, dict)) else (v or None),
# These can be sent as lists and need to be converted to {'values': [...]}
'exception': to_values,
'breadcrumbs': to_values,
'threads': to_values,
}
meta = Meta(data.get('_meta'))
for c in casts:
value = data.pop(c, None)
if value is not None:
try:
data[c] = casts[c](value)
except Exception as e:
meta.enter(c).add_error(EventError.INVALID_DATA, value, {
'reason': six.text_type(e),
})
data['timestamp'] = process_timestamp(data.get('timestamp'),
meta.enter('timestamp'))
# Fill in ip addresses marked as {{auto}}
if self._client_ip:
if get_path(data, 'request', 'env', 'REMOTE_ADDR') == '{{auto}}':
data['request']['env']['REMOTE_ADDR'] = self._client_ip
if get_path(data, 'user', 'ip_address') == '{{auto}}':
data['user']['ip_address'] = self._client_ip
# Validate main event body and tags against schema.
# XXX(ja): jsonschema does not like CanonicalKeyDict, so we need to pass
# in the inner data dict.
validate_and_default_interface(data.data, 'event', meta=meta)
if data.get('tags') is not None:
validate_and_default_interface(
data['tags'], 'tags', name='tags', meta=meta.enter('tags'))
# Validate interfaces
for k in list(iter(data)):
if k in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(k)
# Ignore all top-level None and empty values, regardless whether
# they are interfaces or not. For all other unrecognized attributes,
# we emit an explicit error, unless they are explicitly ignored.
if not value or k in CLIENT_IGNORED_ATTRS:
continue
try:
interface = get_interface(k)
except ValueError:
logger.debug('Ignored unknown attribute: %s', k)
meta.enter(k).add_error(EventError.INVALID_ATTRIBUTE)
continue
normalized = interface.normalize(value, meta.enter(k))
if normalized:
data[interface.path] = normalized
# Additional data coercion and defaulting we only do for store.
if self._for_store:
if self._project is not None:
data['project'] = self._project.id
if self._key is not None:
data['key_id'] = self._key.id
if self._auth is not None:
data['sdk'] = data.get('sdk') or parse_client_as_sdk(self._auth.client)
level = data.get('level') or DEFAULT_LOG_LEVEL
if isinstance(level, int) or (isinstance(level, six.string_types) and level.isdigit()):
level = LOG_LEVELS.get(int(level), DEFAULT_LOG_LEVEL)
if level not in LOG_LEVELS_MAP:
level = DEFAULT_LOG_LEVEL
data['level'] = level
if data.get('dist') and not data.get('release'):
data['dist'] = None
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
# TODO (alex) can this all be replaced by utcnow?
# it looks like the only time that this would even be hit is when timestamp
# is not defined, as the earlier process_timestamp already converts existing
# timestamps to floats.
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
data['received'] = float(timezone.now().strftime('%s'))
setdefault_path(data, 'extra', value={})
setdefault_path(data, 'logger', value=DEFAULT_LOGGER_NAME)
setdefault_path(data, 'tags', value=[])
# Fix case where legacy apps pass 'environment' as a tag
# instead of a top level key.
# TODO (alex) save() just reinserts the environment into the tags
if not data.get('environment'):
tagsdict = dict(data['tags'])
if 'environment' in tagsdict:
data['environment'] = tagsdict['environment']
del tagsdict['environment']
data['tags'] = tagsdict.items()
# the SDKs currently do not describe event types, and we must infer
# them from available attributes
data['type'] = eventtypes.infer(data).key
data['version'] = self.version
exceptions = get_path(data, 'exception', 'values', filter=True)
stacktrace = data.get('stacktrace')
if stacktrace and exceptions and len(exceptions) == 1:
exceptions[0]['stacktrace'] = stacktrace
stacktrace_meta = meta.enter('stacktrace')
meta.enter('exception', 'values', 0, 'stacktrace').merge(stacktrace_meta)
del data['stacktrace']
# TODO(ja): Remove meta data of data['stacktrace'] here, too
# Exception mechanism needs SDK information to resolve proper names in
# exception meta (such as signal names). "SDK Information" really means
# the operating system version the event was generated on. Some
# normalization still works without sdk_info, such as mach_exception
# names (they can only occur on macOS).
if exceptions:
sdk_info = get_sdk_from_event(data)
for ex in exceptions:
if 'mechanism' in ex:
normalize_mechanism_meta(ex['mechanism'], sdk_info)
# This function parses the User Agent from the request if present and fills
# contexts with it.
normalize_user_agent(data)
if not get_path(data, "user", "ip_address"):
# If there is no User ip_address, update it either from the Http
# interface or the client_ip of the request.
is_public = self._auth and self._auth.is_public
add_ip_platforms = ('javascript', 'cocoa', 'objc')
http_ip = get_path(data, 'request', 'env', 'REMOTE_ADDR')
if http_ip:
set_path(data, 'user', 'ip_address', value=http_ip)
elif self._client_ip and (is_public or data.get('platform') in add_ip_platforms):
set_path(data, 'user', 'ip_address', value=self._client_ip)
# Trim values
if data.get('logger'):
data['logger'] = trim(data['logger'].strip(), 64)
if data.get('extra'):
trim_dict(data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
if data.get('culprit'):
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data.get('transaction'):
# XXX: This will be trimmed again when inserted into tag values
data['transaction'] = trim(data['transaction'], MAX_CULPRIT_LENGTH)
# Move some legacy data into tags
site = data.pop('site', None)
if site is not None:
set_tag(data, 'site', site)
server_name = data.pop('server_name', None)
if server_name is not None:
set_tag(data, 'server_name', server_name)
for key in ('fingerprint', 'modules', 'tags', 'extra', 'contexts'):
if not data.get(key):
data.pop(key, None)
# Merge meta errors into the errors array. We need to iterate over the
# raw meta instead of data due to pruned null values.
errors = data.get('errors') or []
add_meta_errors(errors, meta)
add_meta_errors(errors, meta.enter('tags'))
if errors:
data['errors'] = errors
elif 'errors' in data:
del data['errors']
if meta.raw():
data['_meta'] = meta.raw()
elif '_meta' in data:
del data['_meta']
self._data = CanonicalKeyDict(prune_empty_keys(data))
def should_filter(self):
'''
returns (result: bool, reason: string or None)
Result is True if an event should be filtered
The reason for filtering is passed along as a string
so that we can store it in metrics
'''
for name in SECURITY_REPORT_INTERFACES:
if name in self._data:
interface = get_interface(name)
if interface.to_python(self._data[name]).should_filter(self._project):
return (True, FilterStatKeys.INVALID_CSP)
if self._client_ip and not is_valid_ip(self._project, self._client_ip):
return (True, FilterStatKeys.IP_ADDRESS)
release = self._data.get('release')
if release and not is_valid_release(self._project, release):
return (True, FilterStatKeys.RELEASE_VERSION)
error_message = get_path(self._data, 'logentry', 'formatted') \
or get_path(self._data, 'logentry', 'message') \
or ''
if error_message and not is_valid_error_message(self._project, error_message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for exc in get_path(self._data, 'exception', 'values', filter=True, default=[]):
message = u': '.join(
filter(None, map(exc.get, ['type', 'value']))
)
if message and not is_valid_error_message(self._project, message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for filter_cls in filters.all():
filter_obj = filter_cls(self._project)
if filter_obj.is_enabled() and filter_obj.test(self._data):
return (True, six.text_type(filter_obj.id))
return (False, None)
def get_data(self):
return self._data
def _get_event_instance(self, project_id=None):
data = self._data
event_id = data.get('event_id')
platform = data.get('platform')
recorded_timestamp = data.get('timestamp')
date = datetime.fromtimestamp(recorded_timestamp)
date = date.replace(tzinfo=timezone.utc)
time_spent = data.get('time_spent')
data['node_id'] = Event.generate_node_id(project_id, event_id)
return Event(
project_id=project_id or self._project.id,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
platform=platform
)
def get_culprit(self):
"""Helper to calculate the default culprit"""
return force_text(
self._data.get('culprit') or
self._data.get('transaction') or
generate_culprit(self._data, platform=self._data['platform']) or
''
)
def get_event_type(self):
"""Returns the event type."""
return eventtypes.get(self._data.get('type', 'default'))(self._data)
def get_search_message(self, event_metadata=None, culprit=None):
"""This generates the internal event.message attribute which is used
for search purposes. It adds a bunch of data from the metadata and
the culprit.
"""
if event_metadata is None:
event_metadata = self.get_event_type().get_metadata()
if culprit is None:
culprit = self.get_culprit()
data = self._data
message = ''
if data.get('logentry'):
message += (data['logentry'].get('formatted') or
data['logentry'].get('message') or '')
if event_metadata:
for value in six.itervalues(event_metadata):
value_u = force_text(value, errors='replace')
if value_u not in message:
message = u'{} {}'.format(message, value_u)
if culprit and culprit not in message:
culprit_u = force_text(culprit, errors='replace')
message = u'{} {}'.format(message, culprit_u)
return trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)
def save(self, project_id, raw=False, assume_normalized=False):
# Normalize if needed
if not self._normalized:
if not assume_normalized:
self.normalize()
self._normalized = True
from sentry.tasks.post_process import index_event_tags
data = self._data
project = Project.objects.get_from_cache(id=project_id)
# Check to make sure we're not about to do a bunch of work that's
# already been done if we've processed an event with this ID. (This
# isn't a perfect solution -- this doesn't handle ``EventMapping`` and
# there's a race condition between here and when the event is actually
# saved, but it's an improvement. See GH-7677.)
try:
event = Event.objects.get(
project_id=project.id,
event_id=data['event_id'],
)
except Event.DoesNotExist:
pass
else:
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': data['event_id'],
'project_id': project.id,
'model': Event.__name__,
}
)
return event
# Pull out the culprit
culprit = self.get_culprit()
# Pull the toplevel data we're interested in
level = data.get('level')
# TODO(mitsuhiko): this code path should be gone by July 2018.
# This is going to be fine because no code actually still depends
# on integers here. When we need an integer it will be converted
# into one later. Old workers used to send integers here.
if level is not None and isinstance(level, six.integer_types):
level = LOG_LEVELS[level]
transaction_name = data.get('transaction')
logger_name = data.get('logger')
fingerprint = data.get('fingerprint') or ['{{ default }}']
release = data.get('release')
dist = data.get('dist')
environment = data.get('environment')
recorded_timestamp = data.get('timestamp')
# We need to swap out the data with the one internal to the newly
# created event object
event = self._get_event_instance(project_id=project_id)
self._data = data = event.data.data
event._project_cache = project
date = event.datetime
platform = event.platform
event_id = event.event_id
if transaction_name:
transaction_name = force_text(transaction_name)
# Some of the data that are toplevel attributes are duplicated
# into tags (logger, level, environment, transaction). These are
# different from legacy attributes which are normalized into tags
# ahead of time (site, server_name).
tags = dict(data.get('tags') or [])
tags['level'] = level
if logger_name:
tags['logger'] = logger_name
if environment:
tags['environment'] = trim(environment, MAX_TAG_VALUE_LENGTH)
if transaction_name:
tags['transaction'] = trim(transaction_name, MAX_TAG_VALUE_LENGTH)
if release:
# dont allow a conflicting 'release' tag
if 'release' in tags:
del tags['release']
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
tags['sentry:release'] = release.version
if dist and release:
dist = release.add_dist(dist, date)
# dont allow a conflicting 'dist' tag
if 'dist' in tags:
del tags['dist']
tags['sentry:dist'] = dist.name
else:
dist = None
event_user = self._get_event_user(project, data)
if event_user:
# dont allow a conflicting 'user' tag
if 'user' in tags:
del tags['user']
tags['sentry:user'] = event_user.tag_value
# At this point we want to normalize the in_app values in case the
# clients did not set this appropriately so far.
normalize_in_app(data)
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
if added_tags:
# plugins should not override user provided tags
for key, value in added_tags:
tags.setdefault(key, value)
for path, iface in six.iteritems(event.interfaces):
for k, v in iface.iter_tags():
tags[k] = v
# Get rid of ephemeral interface data
if iface.ephemeral:
data.pop(iface.path, None)
# tags are stored as a tuple
tags = tags.items()
# Put the actual tags and fingerprint back
data['tags'] = tags
data['fingerprint'] = fingerprint
hashes = event.get_hashes()
event_type = self.get_event_type()
event_metadata = event_type.get_metadata()
data['type'] = event_type.key
data['metadata'] = event_metadata
data['hashes'] = hashes
# index components into ``Event.message``
# See GH-3248
event.message = self.get_search_message(event_metadata, culprit)
received_timestamp = event.data.get('received') or float(event.datetime.strftime('%s'))
kwargs = {
'platform': platform,
'message': event.message,
'culprit': culprit,
'logger': logger_name,
'level': LOG_LEVELS_MAP.get(level),
'last_seen': date,
'first_seen': date,
'active_at': date,
'data': {
'last_received': received_timestamp,
'type': event_type.key,
# we cache the events metadata on the group to ensure its
# accessible in the stream
'metadata': event_metadata,
},
}
if release:
kwargs['first_release'] = release
try:
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event, hashes=hashes, release=release, **kwargs
)
except HashDiscarded:
event_discarded.send_robust(
project=project,
sender=EventManager,
)
metrics.incr(
'events.discarded',
skip_internal=True,
tags={
'organization_id': project.organization_id,
'platform': platform,
},
)
raise
else:
event_saved.send_robust(
project=project,
event_size=event.size,
sender=EventManager,
)
event.group = group
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
# When an event was sampled, the canonical source of truth
# is the EventMapping table since we aren't going to be writing out an actual
# Event row. Otherwise, if the Event isn't being sampled, we can safely
# rely on the Event table itself as the source of truth and ignore
# EventMapping since it's redundant information.
if is_sample:
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(project=project, group=group, event_id=event_id)
except IntegrityError:
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': EventMapping.__name__,
}
)
return event
environment = Environment.get_or_create(
project=project,
name=environment,
)
group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
group_id=group.id,
environment_id=environment.id,
defaults={
'first_release_id': release.id if release else None,
},
)
if release:
ReleaseEnvironment.get_or_create(
project=project,
release=release,
environment=environment,
datetime=date,
)
ReleaseProjectEnvironment.get_or_create(
project=project,
release=release,
environment=environment,
datetime=date,
)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=environment,
datetime=date,
)
counters = [
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
]
if release:
counters.append((tsdb.models.release, release.id))
tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)
frequencies = [
# (tsdb.models.frequent_projects_by_organization, {
# project.organization_id: {
# project.id: 1,
# },
# }),
# (tsdb.models.frequent_issues_by_project, {
# project.id: {
# group.id: 1,
# },
# })
(tsdb.models.frequent_environments_by_group, {
group.id: {
environment.id: 1,
},
})
]
if release:
frequencies.append(
(tsdb.models.frequent_releases_by_group, {
group.id: {
grouprelease.id: 1,
},
})
)
tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)
UserReport.objects.filter(
project=project,
event_id=event_id,
).update(
group=group,
environment=environment,
)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
logger.info(
'duplicate.found',
exc_info=True,
extra={
'event_uuid': event_id,
'project_id': project.id,
'group_id': group.id,
'model': Event.__name__,
}
)
return event
index_event_tags.delay(
organization_id=project.organization_id,
project_id=project.id,
group_id=group.id,
environment_id=environment.id,
event_id=event.id,
tags=tags,
date_added=event.datetime,
)
if event_user:
tsdb.record_multi(
(
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
),
timestamp=event.datetime,
environment_id=environment.id,
)
if release:
if is_new:
buffer.incr(
ReleaseProject, {'new_groups': 1}, {
'release_id': release.id,
'project_id': project.id,
}
)
if is_new_group_environment:
buffer.incr(
ReleaseProjectEnvironment, {'new_issues_count': 1}, {
'project_id': project.id,
'release_id': release.id,
'environment_id': environment.id,
}
)
safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send_robust(project=project, group=group, sender=Project)
eventstream.insert(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
primary_hash=hashes[0],
# We are choosing to skip consuming the event back
# in the eventstream if it's flagged as raw.
# This means that we want to publish the event
# through the event stream, but we don't care
# about post processing and handling the commit.
skip_consume=raw,
)
metrics.timing(
'events.latency',
received_timestamp - recorded_timestamp,
tags={
'project_id': project.id,
},
)
return event
def _get_event_user(self, project, data):
user_data = data.get('user')
if not user_data:
return
euser = EventUser(
project_id=project.id,
ident=user_data.get('id'),
email=user_data.get('email'),
username=user_data.get('username'),
ip_address=user_data.get('ip_address'),
name=user_data.get('name'),
)
euser.set_hash()
if not euser.hash:
return
cache_key = u'euserid:1:{}:{}'.format(
project.id,
euser.hash,
)
euser_id = default_cache.get(cache_key)
if euser_id is None:
try:
with transaction.atomic(using=router.db_for_write(EventUser)):
euser.save()
except IntegrityError:
try:
euser = EventUser.objects.get(
project_id=project.id,
hash=euser.hash,
)
except EventUser.DoesNotExist:
# why???
e_userid = -1
else:
if euser.name != (user_data.get('name') or euser.name):
euser.update(
name=user_data['name'],
)
e_userid = euser.id
default_cache.set(cache_key, e_userid, 3600)
return euser
def _find_hashes(self, project, hash_list):
return map(
lambda hash: GroupHash.objects.get_or_create(
project=project,
hash=hash,
)[0],
hash_list,
)
def _save_aggregate(self, event, hashes, release, **kwargs):
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
existing_group_id = None
for h in all_hashes:
if h.group_id is not None:
existing_group_id = h.group_id
break
if h.group_tombstone_id is not None:
raise HashDiscarded('Matches group tombstone %s' % h.group_tombstone_id)
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
# it's possible the release was deleted between
# when we queried for the release and now, so
# make sure it still exists
first_release = kwargs.pop('first_release', None)
with transaction.atomic():
short_id = project.next_short_id()
group, group_is_new = Group.objects.create(
project=project,
short_id=short_id,
first_release_id=Release.objects.filter(
id=first_release.id,
).values_list('id', flat=True).first() if first_release else None,
**kwargs
), True
metrics.incr(
'group.created',
skip_internal=True,
tags={'platform': event.platform or 'unknown'}
)
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h for h in all_hashes if h.group_id is None]
if new_hashes:
# XXX: There is a race condition here wherein another process could
# create a new group that is associated with one of the new hashes,
# add some event(s) to it, and then subsequently have the hash
# "stolen" by this process. This then "orphans" those events from
# their "siblings" in the group we've created here. We don't have a
# way to fix this, since we can't update the group on those hashes
# without filtering on `group_id` (which we can't do due to query
# planner weirdness.) For more context, see 84c6f75a and d0e22787,
# as well as GH-5085.
GroupHash.objects.filter(
id__in=[h.id for h in new_hashes],
).exclude(
state=GroupHash.State.LOCKED_IN_MIGRATION,
).update(group=group)
if group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = (
features.has('projects:sample-events', project=project) and should_sample(
event.data.get('received') or float(event.datetime.strftime('%s')),
group.data.get('last_received') or float(group.last_seen.strftime('%s')),
group.times_seen,
)
)
if not is_new:
is_regression = self._process_existing_aggregate(
group=group,
event=event,
data=kwargs,
release=release,
)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
if not is_sample:
GroupHash.record_last_processed_event_id(
all_hashes[0].id,
event.event_id,
)
return group, is_new, is_regression, is_sample
def _handle_regression(self, group, event, release):
if not group.is_resolved():
return
# we only mark it as a regression if the event's release is newer than
# the release which we originally marked this as resolved
elif GroupResolution.has_resolution(group, release):
return
elif has_pending_commit_resolution(group):
return
if not plugin_is_regression(group, event):
return
# we now think its a regression, rely on the database to validate that
# no one beat us to this
date = max(event.datetime, group.last_seen)
is_regression = bool(
Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# ignored
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
)
)
group.active_at = date
group.status = GroupStatus.UNRESOLVED
if is_regression and release:
# resolutions are only valid if the state of the group is still
# resolved -- if it were to change the resolution should get removed
try:
resolution = GroupResolution.objects.get(
group=group,
)
except GroupResolution.DoesNotExist:
affected = False
else:
cursor = connection.cursor()
# delete() API does not return affected rows
cursor.execute("DELETE FROM sentry_groupresolution WHERE id = %s", [resolution.id])
affected = cursor.rowcount > 0
if affected:
# if we had to remove the GroupResolution (i.e. we beat the
# the queue to handling this) then we need to also record
# the corresponding event
try:
activity = Activity.objects.filter(
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
).order_by('-datetime')[0]
except IndexError:
# XXX: handle missing data, as its not overly important
pass
else:
activity.update(data={
'version': release.version,
})
if is_regression:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_REGRESSION,
data={
'version': release.version if release else '',
}
)
activity.send_notification()
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
return is_regression
def _process_existing_aggregate(self, group, event, data, release):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
'data': data['data'],
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = self._handle_regression(group, event, release)
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| 36.478469
| 123
| 0.576937
|
4a06523a82ef2662393a9f852e4a8b4eed0f4009
| 543
|
py
|
Python
|
api/migrations/0002_auto_20210130_0636.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | 1
|
2021-01-31T07:11:25.000Z
|
2021-01-31T07:11:25.000Z
|
api/migrations/0002_auto_20210130_0636.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | null | null | null |
api/migrations/0002_auto_20210130_0636.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2021-01-30 06:36
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='phone_number',
field=models.CharField(max_length=12, validators=[django.core.validators.RegexValidator(message="Entered mobile number isn't in a right format!", regex='^[0-9]{10,15}$')]),
),
]
| 27.15
| 184
| 0.644567
|
4a06524a4aac72b6fb438617aa59056f279c30f5
| 1,076
|
py
|
Python
|
blog/models.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
blog/models.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
blog/models.py
|
DarkoR12/dafi-system
|
f923ea4273b04f7acc7016b2f7d03e51eb00b85b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.utils.text import Truncator
from meta.models import ModelMeta
class Post(ModelMeta, models.Model):
'''
Blog Post
'''
title = models.CharField('título', max_length=200)
slug = models.SlugField('slug', max_length=200)
content = models.TextField('contenido', max_length=5000)
pub_date = models.DateTimeField('fecha de publicación')
author = models.ForeignKey(
get_user_model(), models.PROTECT, verbose_name='autor'
)
image = models.ImageField(
upload_to='blog/', verbose_name='imagen', blank=True
)
_metadata = {
'title': 'title',
'description': 'get_abstract',
'image': 'get_image'
}
def __str__(self):
return self.title
def get_abstract(self):
return Truncator(self.content).chars(200)
def get_image(self):
return self.image.url if self.image else static('images/favicon.png')
| 24.454545
| 77
| 0.677509
|
4a0652be69c2180941e765ce70390f702c566832
| 1,982
|
py
|
Python
|
main.py
|
k783s4/Crash_Strategy_Tester
|
1133a745ec583354c5d64c25ea55a0ba42c403b5
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
k783s4/Crash_Strategy_Tester
|
1133a745ec583354c5d64c25ea55a0ba42c403b5
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
k783s4/Crash_Strategy_Tester
|
1133a745ec583354c5d64c25ea55a0ba42c403b5
|
[
"Apache-2.0"
] | 1
|
2022-03-24T23:46:46.000Z
|
2022-03-24T23:46:46.000Z
|
def bot(balance, prev_outcome,prev_out_arr, remainder):
flag = False
#Every 3000 set the money aside
if(balance >= 5000):
flag = True
#If we win
if(prev_outcome):
betsize = balance / 200000
return [2.0,betsize, flag]
#If we loose
else:
if((prev_out_arr[1] * 2.0) <= remainder):
return [2.0,prev_out_arr[1] * 2, flag]
else:
if(remainder < balance / 100000):
return [2.0,balance / 100000, flag]
return [2.0, remainder, flag]
def two(balance, prev_outcome,prev_out_arr, remainder):
if((remainder * 0.5) <= 0.04):
return [1.3,0.04]
if(prev_outcome):
return [1.3,remainder * 0.5]
else:
return [1.3, 0.04]
def three(balance, prev_outcome,prev_out_arr,losstreak):
if(prev_outcome):
#betsize = balance / 2100000
return [2.0,0.01]
else:
if((prev_out_arr[1] * 2.0) <= balance):
return [2.0,prev_out_arr[1] * 2]
else:
return [2.0, balance]
def four(balance, prev_outcome, prev_out_arr):
#If we win
if(prev_outcome):
betsize = 0.01
#check bet size
if(balance > 2000):
betsize = 0.02
if(balance > 3000):
betsize = 0.03
if(balance > 4000):
betsize = 0.04
if(balance > 5000):
betsize = 0.05
if(balance > 6000):
betsize = 0.06
return [2.0,betsize]
#If we loose
else:
if((prev_out_arr[1] * 2.0) <= balance):
return [2.0,prev_out_arr[1] * 2]
else:
return [2.0, balance]
def five(balance, prev_outcome, topvalue, prev_out_arr):
#If we win
if(prev_outcome):
betsize = balance / 100000
return [2.0,betsize]
#If we loose
else:
if((prev_out_arr[1] * 2.0) <= balance):
return [2.0,prev_out_arr[1] * 2]
else:
return [2.0, balance]
| 27.915493
| 56
| 0.531282
|
4a0652fa37fb240ac38227f0544715072c48cd9c
| 2,316
|
py
|
Python
|
SQL_python/DataBasePython/createTab.py
|
cartellefo/projet
|
23c67e847b415fb47f71e830b89a227fffed109b
|
[
"MIT"
] | null | null | null |
SQL_python/DataBasePython/createTab.py
|
cartellefo/projet
|
23c67e847b415fb47f71e830b89a227fffed109b
|
[
"MIT"
] | null | null | null |
SQL_python/DataBasePython/createTab.py
|
cartellefo/projet
|
23c67e847b415fb47f71e830b89a227fffed109b
|
[
"MIT"
] | null | null | null |
import psycopg2
#from config import config
def create_tables():
commands = (
"""
CREATE TABLE vendeur (
vendeur_id SERIAL PRIMARY KEY,
vendeur_name VARCHAR(255) NOT NULL
)
""",
""" CREATE TABLE parts (
part_id SERIAL PRIMARY KEY,
part_name VARCHAR(255) NOT NULL
)
""",
"""
CREATE TABLE part_connecteur (
part_id INTEGER PRIMARY KEY,
file_extension VARCHAR(5) NOT NULL,
drawing_data BYTEA NOT NULL,
FOREIGN KEY (part_id)
REFERENCES parts (part_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"""
CREATE TABLE membres (
age INTEGER ,
nom VARCHAR(5) NOT NULL,
taile numeric NOT NULL,
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"""
CREATE TABLE vendeur_parts (
vendor_id INTEGER NOT NULL,
part_id INTEGER NOT NULL,
PRIMARY KEY (vendor_id , part_id),
FOREIGN KEY (vendor_id)
REFERENCES vendors (vendor_id)
ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (part_id)
REFERENCES parts (part_id)
ON UPDATE CASCADE ON DELETE CASCADE
)
""")
# def insert_tables():
# commands = (
# cur.execute("INSERT INTO membres(age,nom,taille) VALUES(21,'Dupont',1.83)")
# cur.execute("INSERT INTO membres(age,nom,taille) VALUES(15,'Blumâr',1.57)")
# cur.execute("INSERT Into membres(age,nom,taille) VALUES(18,'Özémir',1.69)")
# )
conn = None
try:
conn = psycopg2.connect("dbname='postgres' user='postgres' host='localhost' password='dbpass'")
cur = conn.cursor()
for command in commands:
cur.execute(command)
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
create_tables()
| 24.378947
| 103
| 0.501295
|
4a0653f4683dc94885d53489010129d4d3422fdb
| 1,926
|
py
|
Python
|
deeplearning/ml4pl/graphs/labelled/dataflow/annotators_benchmark_test.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | null | null | null |
deeplearning/ml4pl/graphs/labelled/dataflow/annotators_benchmark_test.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | 2
|
2020-07-27T08:22:06.000Z
|
2020-07-30T17:34:35.000Z
|
deeplearning/ml4pl/graphs/labelled/dataflow/annotators_benchmark_test.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | 1
|
2020-06-05T04:58:13.000Z
|
2020-06-05T04:58:13.000Z
|
# Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for comparing annotator performance."""
from deeplearning.ml4pl.graphs.labelled.dataflow import annotate
from deeplearning.ml4pl.graphs.labelled.dataflow import data_flow_graphs
from deeplearning.ml4pl.testing import random_programl_generator
from labm8.py import prof
from labm8.py import test
FLAGS = test.FLAGS
MODULE_UNDER_TEST = None
PYTEST_ARGS = ["--benchmark-warmup-iterations=2"]
# Real programs to test over.
PROTOS = list(random_programl_generator.EnumerateTestSet(n=20))
# The annotators to test.
ANNOTATORS = {
analysis: annotate.ANALYSES[analysis]
for analysis in annotate.AVAILABLE_ANALYSES
}
def AnnotatorBenchmark(annotator_class):
"""A micro-benchmark that runs annotator over a list of test graphs."""
with prof.Profile(
f"Completed benchmark of {len(PROTOS) * 5} annotations "
f"using {annotator_class.__name__}"
):
for graph in PROTOS:
annotator = annotator_class(graph)
annotator.MakeAnnotated(5)
@test.Parametrize(
"annotator", list(ANNOTATORS.values()), names=list(ANNOTATORS.keys())
)
def test_benchmark_nx_annotator(
benchmark, annotator: data_flow_graphs.NetworkXDataFlowGraphAnnotator,
):
"""Benchmark analysis."""
benchmark(AnnotatorBenchmark, annotator)
if __name__ == "__main__":
test.Main()
| 31.064516
| 74
| 0.767913
|
4a06559775871ac1febbe376c98562695c199955
| 2,774
|
py
|
Python
|
message_handler/rabbit_message_queue.py
|
jluech/PGAcloud_Fitness_Agent
|
bde5f0113af99ad1ed324c8b0ad9c5bc85e57245
|
[
"MIT"
] | null | null | null |
message_handler/rabbit_message_queue.py
|
jluech/PGAcloud_Fitness_Agent
|
bde5f0113af99ad1ed324c8b0ad9c5bc85e57245
|
[
"MIT"
] | null | null | null |
message_handler/rabbit_message_queue.py
|
jluech/PGAcloud_Fitness_Agent
|
bde5f0113af99ad1ed324c8b0ad9c5bc85e57245
|
[
"MIT"
] | null | null | null |
import json
import logging
import pika
from agent.operator import call_operator
from message_handler.message_handler import MessageHandler
from population.individual import Individual, IndividualEncoder
from utilities import utils
def receive_operator_callback(channel, method, properties, body):
queue_name = utils.get_messaging_source()
ind_dict = json.loads(body)
individual = Individual(ind_dict["solution"], ind_dict["fitness"])
logging.info("rMQ:{queue_}: Received agent operator request for individual: {ind_}".format(
queue_=queue_name,
ind_=individual,
))
resulting_individual = call_operator(individual)
send_message_to_queue(
channel=channel,
payload=resulting_individual
)
def send_message_to_queue(channel, payload):
# Route the message to the next queue in the model.
next_recipient = utils.get_messaging_target()
channel.queue_declare(queue=next_recipient, auto_delete=True, durable=True)
# Send message to given recipient.
logging.info("rMQ: Sending '{body_}' to {dest_}.".format(
body_=payload,
dest_=next_recipient,
))
channel.basic_publish(
exchange="",
routing_key=next_recipient,
body=json.dumps(payload, cls=IndividualEncoder),
# Delivery mode 2 makes the broker save the message to disk.
# This will ensure that the message be restored on reboot even
# if RabbitMQ crashes before having forwarded the message.
properties=pika.BasicProperties(
delivery_mode=2,
),
)
class RabbitMessageQueue(MessageHandler):
def __init__(self, pga_id):
# Establish connection to rabbitMQ.
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host="rabbitMQ--{id_}".format(id_=pga_id),
socket_timeout=30,
))
def receive_messages(self):
# Define communication channel.
channel = self.connection.channel()
# Create queue for fitness evaluation.
queue_name = utils.get_messaging_source()
channel.queue_declare(queue=queue_name, auto_delete=True, durable=True)
# Actively listen for messages in queue and perform callback on receive.
channel.basic_consume(
queue=queue_name,
on_message_callback=receive_operator_callback,
)
logging.info("rMQ:{queue_}: Waiting for fitness evaluation requests.".format(
queue_=queue_name
))
channel.start_consuming()
def send_message(self, individual):
# Define communication channel.
channel = self.connection.channel()
send_message_to_queue(
channel=channel,
payload=individual
)
| 32.255814
| 95
| 0.684932
|
4a06576e638367438062ceea7431db0bc405a08e
| 123
|
py
|
Python
|
demos/19.01.30/10am/problem_2.py
|
joeparis/CS161-Demos-Winter-19
|
613a93de9169fa4ee62538ff0ac93a9930b75fef
|
[
"MIT"
] | null | null | null |
demos/19.01.30/10am/problem_2.py
|
joeparis/CS161-Demos-Winter-19
|
613a93de9169fa4ee62538ff0ac93a9930b75fef
|
[
"MIT"
] | null | null | null |
demos/19.01.30/10am/problem_2.py
|
joeparis/CS161-Demos-Winter-19
|
613a93de9169fa4ee62538ff0ac93a9930b75fef
|
[
"MIT"
] | null | null | null |
user_input = input("Please enter a string: ")
for character in user_input:
print(f"{character} = {ord(character):3}")
| 24.6
| 46
| 0.691057
|
4a065772d7894bfded9c904d6be047159397fa97
| 9,970
|
py
|
Python
|
pyamg/classical/cr.py
|
thomasjpfan/pyamg
|
b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a
|
[
"MIT"
] | null | null | null |
pyamg/classical/cr.py
|
thomasjpfan/pyamg
|
b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a
|
[
"MIT"
] | 1
|
2019-12-06T17:06:29.000Z
|
2019-12-06T17:06:29.000Z
|
pyamg/classical/cr.py
|
lexeyV/pyamg
|
cabbb008fa26d4c9d8c24decf06374185864c88b
|
[
"MIT"
] | null | null | null |
"""Compatible Relaxation."""
from __future__ import print_function
import numpy as np
import scipy as sp
from scipy.linalg import norm
from scipy.sparse import isspmatrix, spdiags, isspmatrix_csr
from copy import deepcopy
from ..relaxation.relaxation import gauss_seidel, gauss_seidel_indexed
from pyamg import amg_core
__all__ = ['CR', 'binormalize']
def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method):
"""Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector
"""
n = A.shape[0] # problem size
numax = nu
z = np.zeros((n,))
e = deepcopy(B[:, 0])
e[Cindex] = 0.0
enorm = norm(e)
rhok = 1
it = 0
while True:
if method == 'habituated':
gauss_seidel(A, e, z, iterations=1)
e[Cindex] = 0.0
elif method == 'concurrent':
gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
else:
raise NotImplementedError('method not recognized: need habituated '
'or concurrent')
enorm_old = enorm
enorm = norm(e)
rhok_old = rhok
rhok = enorm / enorm_old
it += 1
# criteria 1 -- fast convergence
if rhok < 0.1 * thetacr:
break
# criteria 2 -- at least nu iters, small relative change in CF (<0.1)
elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu):
break
return rhok, e
def CR(A, method='habituated', B=None, nu=3, thetacr=0.7,
thetacs='auto', maxiter=20, verbose=False):
"""Use Compatible Relaxation to compute a C/F splitting.
Parameters
----------
A : csr_matrix
sparse matrix (n x n) usually matrix A of Ax=b
method : {'habituated','concurrent'}
Method used during relaxation:
- concurrent: GS relaxation on F-points, leaving e_c = 0
- habituated: full relaxation, setting e_c = 0
B : array like
Target algebraically smooth vector used in CR. If multiple
vectors passed in, only first one is used. If B=None, the
constant vector is used.
nu : int
Number of smoothing iterations to apply each CR sweep.
thetacr : float
Desired convergence factor of relaxations, 0 < thetacr < 1.
thetacs : list, float, 'auto'
Threshold value, 0 < thetacs < 1, to consider nodes from
candidate set for coarse grid. If e[i] > thetacs for relaxed
error vector, e, node i is considered for the coarse grid.
Can be passed in as float to be used for every iteration,
list of floats to be used on progressive iterations, or as
string 'auto,' wherein each iteration thetacs = 1 - rho, for
convergence factor rho from most recent smoothing.
maxiter : int
Maximum number of CR iterations (updating of C/F splitting)
to do.
verbose : bool
If true, print iteration number, convergence factor and
coarsening factor after each iteration.
Returns
-------
splitting : array
C/F list of 1's (coarse pt) and 0's (fine pt) (n x 1)
References
----------
[1] Brannick, James J., and Robert D. Falgout. "Compatible
relaxation and coarsening in algebraic multigrid." SIAM Journal
on Scientific Computing 32.3 (2010): 1393-1416.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from cr import CR
>>> A = poisson((20,20),format='csr')
>>> splitting = CR(A)
"""
n = A.shape[0] # problem size
if thetacs == 'auto':
pass
else:
if isinstance(thetacs, list):
thetacs.reverse()
elif isinstance(thetacs, float):
thetacs = list(thetacs)
if (np.max(thetacs) >= 1) or (np.min(thetacs) <= 0):
raise ValueError("Must have 0 < thetacs < 1")
if (thetacr >= 1) or (thetacr <= 0):
raise ValueError("Must have 0 < thetacr < 1")
if not isspmatrix_csr(A):
raise TypeError('expecting csr sparse matrix A')
if A.dtype == complex:
raise NotImplementedError('complex A not implemented')
# Set initial vector. If none provided, set default
# initial vector of ones
if B is None:
B = np.ones((n, 1))
elif (B.ndim == 1):
B = B.reshape((len(B), 1))
target = B[:, 0]
# 3.1a - Initialize all nodes as F points
splitting = np.zeros((n,), dtype='intc')
indices = np.zeros((n+1,), dtype='intc')
indices[0] = n
indices[1:] = np.arange(0, n, dtype='intc')
Findex = indices[1:]
Cindex = np.empty((0,), dtype='intc')
gamma = np.zeros((n,))
# 3.1b - Run initial smoothing sweep
rho, e = _CRsweep(A, B, Findex, Cindex, nu, thetacr, method=method)
# 3.1c - Loop until desired convergence or maximum iterations reached
for it in range(0, maxiter):
# Set thetacs value
if thetacs == 'auto':
tcs = 1-rho
else:
tcs = thetacs[-1]
if len(thetacs) > 1:
thetacs.pop()
# 3.1d - 3.1f, see amg_core.ruge_stuben
fn = amg_core.cr_helper
fn(A.indptr,
A.indices,
target,
e,
indices,
splitting,
gamma,
tcs)
# Separate F indices and C indices
num_F = indices[0]
Findex = indices[1:(num_F+1)]
Cindex = indices[(num_F+1):]
# 3.1g - Call CR smoothing iteration
rho, e = _CRsweep(A, B, Findex, Cindex, nu, thetacr, method=method)
# Print details on current iteration
if verbose:
print("CR Iteration ", it, ", CF = ", rho,
", Coarsening factor = ", float(n-indices[0])/n)
# If convergence factor satisfactory, break loop
if rho < thetacr:
break
return splitting
def binormalize(A, tol=1e-5, maxiter=10):
"""Binormalize matrix A. Attempt to create unit l_1 norm rows.
Parameters
----------
A : csr_matrix
sparse matrix (n x n)
tol : float
tolerance
x : array
guess at the diagonal
maxiter : int
maximum number of iterations to try
Returns
-------
C : csr_matrix
diagonally scaled A, C=DAD
Notes
-----
- Goal: Scale A so that l_1 norm of the rows are equal to 1:
- B = DAD
- want row sum of B = 1
- easily done with tol=0 if B=DA, but this is not symmetric
- algorithm is O(N log (1.0/tol))
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import binormalize
>>> A = poisson((10,),format='csr')
>>> C = binormalize(A)
References
----------
.. [1] Livne, Golub, "Scaling by Binormalization"
Tech Report SCCM-03-12, SCCM, Stanford, 2003
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679
"""
if not isspmatrix(A):
raise TypeError('expecting sparse matrix A')
if A.dtype == complex:
raise NotImplementedError('complex A not implemented')
n = A.shape[0]
it = 0
x = np.ones((n, 1)).ravel()
# 1.
B = A.multiply(A).tocsc() # power(A,2) inconsistent in numpy, scipy.sparse
d = B.diagonal().ravel()
# 2.
beta = B * x
betabar = (1.0/n) * np.dot(x, beta)
stdev = rowsum_stdev(x, beta)
# 3
while stdev > tol and it < maxiter:
for i in range(0, n):
# solve equation x_i, keeping x_j's fixed
# see equation (12)
c2 = (n-1)*d[i]
c1 = (n-2)*(beta[i] - d[i]*x[i])
c0 = -d[i]*x[i]*x[i] + 2*beta[i]*x[i] - n*betabar
if (-c0 < 1e-14):
print('warning: A nearly un-binormalizable...')
return A
else:
# see equation (12)
xnew = (2*c0)/(-c1 - np.sqrt(c1*c1 - 4*c0*c2))
dx = xnew - x[i]
# here we assume input matrix is symmetric since we grab a row of B
# instead of a column
ii = B.indptr[i]
iii = B.indptr[i+1]
dot_Bcol = np.dot(x[B.indices[ii:iii]], B.data[ii:iii])
betabar = betabar + (1.0/n)*dx*(dot_Bcol + beta[i] + d[i]*dx)
beta[B.indices[ii:iii]] += dx*B.data[ii:iii]
x[i] = xnew
stdev = rowsum_stdev(x, beta)
it += 1
# rescale for unit 2-norm
d = np.sqrt(x)
D = spdiags(d.ravel(), [0], n, n)
C = D * A * D
C = C.tocsr()
beta = C.multiply(C).sum(axis=1)
scale = np.sqrt((1.0/n) * np.sum(beta))
return (1/scale)*C
def rowsum_stdev(x, beta):
r"""Compute row sum standard deviation.
Compute for approximation x, the std dev of the row sums
s(x) = ( 1/n \sum_k (x_k beta_k - betabar)^2 )^(1/2)
with betabar = 1/n dot(beta,x)
Parameters
----------
x : array
beta : array
Returns
-------
s(x)/betabar : float
Notes
-----
equation (7) in Livne/Golub
"""
n = x.size
betabar = (1.0/n) * np.dot(x, beta)
stdev = np.sqrt((1.0/n) *
np.sum(np.power(np.multiply(x, beta) - betabar, 2)))
return stdev/betabar
| 28.815029
| 79
| 0.564092
|
4a0657ad65f573d02d23627efee488f6f534414c
| 1,953
|
py
|
Python
|
ngram_counts.py
|
spyysalo/recover-nonascii-characters
|
2aea1f20f90e152e880f8a495b596966bea241fc
|
[
"MIT"
] | null | null | null |
ngram_counts.py
|
spyysalo/recover-nonascii-characters
|
2aea1f20f90e152e880f8a495b596966bea241fc
|
[
"MIT"
] | null | null | null |
ngram_counts.py
|
spyysalo/recover-nonascii-characters
|
2aea1f20f90e152e880f8a495b596966bea241fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import re
import gzip
from collections import Counter, defaultdict
from argparse import ArgumentParser
DESCRIPTION = 'Get counts for N-grams with non-ASCII alphabetic characters'
TOKENIZATION_RE = re.compile('([^\W\d_]+|\d+|\s+|.)')
NON_ASCII_ALPHA_RE = re.compile('[^\W\d_a-zA-Z]')
def argparser():
ap = ArgumentParser(description=DESCRIPTION)
ap.add_argument('-l', '--lowercase', default=False, action='store_true')
ap.add_argument('-n', default=2, type=int)
ap.add_argument('text', nargs='+')
return ap
def tokenize(sentence):
tokens = TOKENIZATION_RE.split(sentence)
tokens = ['<s>'] + [t for t in tokens if t and not t.isspace()] + ['</s>']
return tokens
def ngrams(tokens, n):
return [tokens[i:i+n] for i in range(len(tokens)-n+1)]
def contains_non_ascii_alpha(string):
return NON_ASCII_ALPHA_RE.search(string)
def count_ngrams(stream, counts, options):
for ln, l in enumerate(stream, start=1):
if options.lowercase:
l = l.lower()
for n in range(1, options.n+1):
for ngram in ngrams(tokenize(l), n):
if contains_non_ascii_alpha(ngram[-1]):
counts[n][' '.join(ngram)] += 1
return counts
def main(argv):
args = argparser().parse_args(argv[1:])
gzopen = lambda fn: gzip.open(fn, 'rt', encoding='utf-8')
counts = defaultdict(Counter)
for fn in args.text:
xopen = gzopen if fn.endswith('.gz') else open
with xopen(fn) as f:
count_ngrams(f, counts, args)
print('\\data\\')
for n in range(1, args.n+1):
total = sum(counts[n].values())
print(f'ngram {n}={total}')
for n in range(1, args.n+1):
print(f'\n\\{n}-grams:')
for k, v in sorted(counts[n].items(), key=lambda d: -d[1]):
print(f'{v}\t{k}')
print('\n\\end\\')
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 25.363636
| 78
| 0.610343
|
4a06586fdf2fed6bc152066dcb41cf5efaeb1cca
| 1,548
|
py
|
Python
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/scripts/dataspec.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | 2
|
2017-07-11T18:56:27.000Z
|
2017-07-28T14:01:12.000Z
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/scripts/dataspec.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | 1
|
2017-07-28T13:53:41.000Z
|
2017-07-31T15:30:40.000Z
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/scripts/dataspec.py
|
Alecto3-D/testable-greeter
|
09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78
|
[
"MIT"
] | null | null | null |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import json
import os
import sys
from twisted.internet import defer
from buildbot.data import connector
from buildbot.test.fake import fakemaster
from buildbot.util import in_reactor
@in_reactor
@defer.inlineCallbacks
def dataspec(config):
master = yield fakemaster.make_master()
data = connector.DataConnector()
data.setServiceParent(master)
if config['out'] != '--':
dirs = os.path.dirname(config['out'])
if dirs and not os.path.exists(dirs):
os.makedirs(dirs)
f = open(config['out'], "w")
else:
f = sys.stdout
if config['global'] is not None:
f.write("window." + config['global'] + '=')
f.write(json.dumps(data.allEndpoints(), indent=2))
f.close()
defer.returnValue(0)
| 32.25
| 79
| 0.721576
|
4a0658d084744a4b8901dc1da919021faf447cdb
| 1,391
|
py
|
Python
|
packages/python/plotly/plotly/validators/scattergeo/selected/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 3
|
2020-02-04T21:39:20.000Z
|
2020-11-17T19:07:07.000Z
|
packages/python/plotly/plotly/validators/scattergeo/selected/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 12
|
2020-06-06T01:22:26.000Z
|
2022-03-12T00:13:42.000Z
|
packages/python/plotly/plotly/validators/scattergeo/selected/__init__.py
|
sgn/plotly.py
|
587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6
|
[
"MIT"
] | 17
|
2019-11-21T14:11:29.000Z
|
2019-11-21T15:26:23.000Z
|
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="textfont", parent_name="scattergeo.selected", **kwargs
):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the text font color of selected points.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="marker", parent_name="scattergeo.selected", **kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
""",
),
**kwargs
)
| 29.595745
| 81
| 0.575126
|
4a0658e1c3801120cccbc778f5ef5a84e835e094
| 7,123
|
py
|
Python
|
lib/django-0.96/django/db/transaction.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-0.96/django/db/transaction.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-0.96/django/db/transaction.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
"""
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
try:
import thread
except ImportError:
import dummy_thread as thread
from django.db import connection
from django.conf import settings
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
# The state is a dictionary of lists. The key to the dict is the current
# thread and the list is handled as a stack of values.
state = {}
# The dirty flag is set by *_unless_managed functions to denote that the
# code under transaction management has changed things to require a
# database commit.
dirty = {}
def enter_transaction_management():
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
thread_ident = thread.get_ident()
if state.has_key(thread_ident) and state[thread_ident]:
state[thread_ident].append(state[thread_ident][-1])
else:
state[thread_ident] = []
state[thread_ident].append(settings.TRANSACTIONS_MANAGED)
if not dirty.has_key(thread_ident):
dirty[thread_ident] = False
def leave_transaction_management():
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
thread_ident = thread.get_ident()
if state.has_key(thread_ident) and state[thread_ident]:
del state[thread_ident][-1]
else:
raise TransactionManagementError("This code isn't under transaction management")
if dirty.get(thread_ident, False):
rollback()
raise TransactionManagementError("Transaction managed block ended with pending COMMIT/ROLLBACK")
dirty[thread_ident] = False
def is_dirty():
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return dirty.get(thread.get_ident(), False)
def set_dirty():
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
thread_ident = thread.get_ident()
if dirty.has_key(thread_ident):
dirty[thread_ident] = True
else:
raise TransactionManagementError("This code isn't under transaction management")
def set_clean():
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
thread_ident = thread.get_ident()
if dirty.has_key(thread_ident):
dirty[thread_ident] = False
else:
raise TransactionManagementError("This code isn't under transaction management")
def is_managed():
"""
Checks whether the transaction manager is in manual or in auto state.
"""
thread_ident = thread.get_ident()
if state.has_key(thread_ident):
if state[thread_ident]:
return state[thread_ident][-1]
return settings.TRANSACTIONS_MANAGED
def managed(flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
thread_ident = thread.get_ident()
top = state.get(thread_ident, None)
if top:
top[-1] = flag
if not flag and is_dirty():
connection._commit()
set_clean()
else:
raise TransactionManagementError("This code isn't under transaction management")
def commit_unless_managed():
"""
Commits changes if the system is not in managed transaction mode.
"""
if not is_managed():
connection._commit()
else:
set_dirty()
def rollback_unless_managed():
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if not is_managed():
connection._rollback()
else:
set_dirty()
def commit():
"""
Does the commit itself and resets the dirty flag.
"""
connection._commit()
set_clean()
def rollback():
"""
This function does the rollback itself and resets the dirty flag.
"""
connection._rollback()
set_clean()
##############
# DECORATORS #
##############
def autocommit(func):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
def _autocommit(*args, **kw):
try:
enter_transaction_management()
managed(False)
return func(*args, **kw)
finally:
leave_transaction_management()
return _autocommit
def commit_on_success(func):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in web apps.
"""
def _commit_on_success(*args, **kw):
try:
enter_transaction_management()
managed(True)
try:
res = func(*args, **kw)
except Exception, e:
if is_dirty():
rollback()
raise
else:
if is_dirty():
commit()
return res
finally:
leave_transaction_management()
return _commit_on_success
def commit_manually(func):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
def _commit_manually(*args, **kw):
try:
enter_transaction_management()
managed(True)
return func(*args, **kw)
finally:
leave_transaction_management()
return _commit_manually
| 31.941704
| 104
| 0.673452
|
4a06599bf361bbe24d36e0d3616b6f07b9f8303c
| 1,224
|
py
|
Python
|
src/app/celery.py
|
f213/education-backend
|
e76151ac1ee8485debf62ed3100717206ed87712
|
[
"MIT"
] | 151
|
2020-04-21T09:58:57.000Z
|
2021-09-12T09:01:21.000Z
|
src/app/celery.py
|
f213/education-backend
|
e76151ac1ee8485debf62ed3100717206ed87712
|
[
"MIT"
] | 163
|
2020-05-29T20:52:00.000Z
|
2021-09-11T12:44:56.000Z
|
src/app/celery.py
|
f213/education-backend
|
e76151ac1ee8485debf62ed3100717206ed87712
|
[
"MIT"
] | 39
|
2020-04-21T12:28:16.000Z
|
2021-09-12T15:33:47.000Z
|
import os
from celery import Celery
from celery.schedules import crontab
from django.conf import settings
from app.conf.environ import env
__all__ = [
'celery',
]
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
celery = Celery('app')
celery.conf.update(
broker_url=env('REDISCLOUD_URL'),
task_always_eager=env('CELERY_ALWAYS_EAGER', cast=bool, default=settings.DEBUG), # by default in debug mode we run all celery tasks in foregroud.
task_eager_propagates=True,
task_ignore_result=True,
timezone=env('TIME_ZONE', cast=str, default='Europe/Moscow'),
enable_utc=False,
beat_schedule={
'run_started_purchase_trigger': {
'task': 'triggers.tasks.check_for_started_purchase_triggers',
'schedule': crontab(hour='*', minute=15),
},
'run_record_feedback_trigger': {
'task': 'triggers.tasks.check_for_record_feedback_triggers',
'schedule': crontab(hour='*', minute=15),
},
'ship_unshipped_orders': {
'task': 'orders.tasks.ship_unshipped_orders',
'schedule': crontab(hour='*', minute=0),
},
},
)
celery.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 29.853659
| 150
| 0.67402
|
4a0659addb5a9d1f42e87985ba34039450b9408f
| 4,262
|
py
|
Python
|
src/main.py
|
romainbsl/flask-todo
|
a40e76b685ed132bf76d3a198bef5f8a89ad3bc4
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
romainbsl/flask-todo
|
a40e76b685ed132bf76d3a198bef5f8a89ad3bc4
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
romainbsl/flask-todo
|
a40e76b685ed132bf76d3a198bef5f8a89ad3bc4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import json
import flask
import psycopg2
from flask import jsonify, Response
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from qovery_client.qovery import Qovery
# --- START INIT ---
app = flask.Flask(__name__)
# this file is not used while deployed on Qovery
configuration_file_path = '../.qovery/local_configuration.json'
# get database configuration from Qovery
qovery = Qovery(configuration_file_path=configuration_file_path)
db_conf = qovery.databases[0]
# Setup PostgreSQL
conn = psycopg2.connect(host=db_conf.host, user=db_conf.username, database='postgres', password=db_conf.password, port=db_conf.port)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = conn.cursor()
queries = [
"""
CREATE TABLE IF NOT EXISTS todo (
id SERIAL PRIMARY KEY,
created_at TIMESTAMP DEFAULT NOW(),
title TEXT NOT NULL,
description TEXT
)
"""
]
for query in queries:
cursor.execute(query)
# --- END INIT ---
class Todo(object):
def __init__(self, row_tuple=None, json_dict=None):
self.id = None
self.created_at = None
self.title = None
self.description = None
if row_tuple:
self.id = row_tuple[0]
self.created_at = row_tuple[1]
self.title = row_tuple[2]
self.description = row_tuple[3]
if json_dict:
if 'title' in json_dict:
self.title = json_dict['title']
if 'description' in json_dict:
self.description = json_dict['description']
@property
def to_json_dict(self):
return {
'id': self.id,
'created_at': self.created_at.isoformat(),
'title': self.title,
'description': self.description
}
@property
def error_message(self):
if not self.title:
return 'title field is mandatory'
return None
@app.route('/', methods=['GET'])
def index():
branch_name = qovery.branch_name
if not branch_name:
branch_name = 'unknown'
return "<h1>Welcome :)</h1><p>The current branch is <b>" + branch_name + "</b></p><p>Source code available " \
"<a href='https://github.com/evoxmusic/flask-todo'>here</a></p>" \
"<p>API resources available:</p>" \
"<ul><li>GET /api/todo -> to list todo</li>" \
"<li>GET /api/todo/:id -> to show todo by id</li>" \
"<li>POST /api/todo -> to add todo</li>" \
"<li>DELETE /api/todo/:id -> to delete todo by id</li></ul>"
@app.route('/api/todo', methods=['GET'])
def list_todo():
results = []
cursor.execute('SELECT * FROM todo')
for row in cursor.fetchall():
results.append(Todo(row_tuple=row).to_json_dict)
return jsonify({'results': results})
@app.route('/api/todo', methods=['POST'])
def add_todo():
json_dict = flask.request.get_json()
todo = Todo(json_dict=json_dict)
if todo.error_message:
return Response(json.dumps({'error_message': todo.error_message}), status=400)
cursor.execute('INSERT INTO todo (title, description) VALUES (%s, %s) RETURNING id, created_at, title, description',
(todo.title, todo.description,))
resp = cursor.fetchone()
return Response(json.dumps(Todo(row_tuple=resp).to_json_dict), status=201)
@app.route('/api/todo/<id>', methods=['GET'])
def get_todo(id):
cursor.execute('SELECT * FROM todo WHERE id = %s LIMIT 1', (id,))
resp = cursor.fetchone()
if not resp:
return jsonify()
return jsonify(Todo(row_tuple=resp).to_json_dict)
@app.route('/api/todo/<id>', methods=['DELETE'])
def delete_todo(id):
cursor.execute('DELETE FROM todo WHERE id = %s', (id,))
return '', 204
if __name__ == '__main__':
print('Server is ready!')
app.run(host='0.0.0.0', port=5000)
| 29.393103
| 143
| 0.572736
|
4a0659e5a759cffacb92ff19de26dd3c8e90b10b
| 6,627
|
py
|
Python
|
main.py
|
ricosega/ci-cd-serverless-example-cloudbuild-and-gcp
|
262f3e4b5a97281d10a3f4d545c5fa7103a1e15a
|
[
"MIT"
] | null | null | null |
main.py
|
ricosega/ci-cd-serverless-example-cloudbuild-and-gcp
|
262f3e4b5a97281d10a3f4d545c5fa7103a1e15a
|
[
"MIT"
] | null | null | null |
main.py
|
ricosega/ci-cd-serverless-example-cloudbuild-and-gcp
|
262f3e4b5a97281d10a3f4d545c5fa7103a1e15a
|
[
"MIT"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START functions_helloworld_http]
# [START functions_http_content]
from flask import escape
# [END functions_helloworld_http]
# [END functions_http_content]
# [START functions_tips_terminate]
# [START functions_helloworld_get]
def hello_get(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
"""
return 'Hello ackstorm!'
# [END functions_helloworld_get]
# [START functions_helloworld_background]
def hello_background(data, context):
"""Background Cloud Function.
Args:
data (dict): The dictionary with data specific to the given event.
context (google.cloud.functions.Context): The Cloud Functions event
metadata.
"""
if data and 'name' in data:
name = data['name']
else:
name = 'World'
return 'Hello {}!'.format(name)
# [END functions_helloworld_background]
# [END functions_tips_terminate]
# [START functions_helloworld_http]
def hello_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'name' in request_json:
name = request_json['name']
elif request_args and 'name' in request_args:
name = request_args['name']
else:
name = 'World'
return 'Hello {}!'.format(escape(name))
# [END functions_helloworld_http]
# [START functions_helloworld_pubsub]
def hello_pubsub(data, context):
"""Background Cloud Function to be triggered by Pub/Sub.
Args:
data (dict): The dictionary with data specific to this type of event.
context (google.cloud.functions.Context): The Cloud Functions event
metadata.
"""
import base64
if 'data' in data:
name = base64.b64decode(data['data']).decode('utf-8')
else:
name = 'World'
print('Hello {}!'.format(name))
# [END functions_helloworld_pubsub]
# [START functions_helloworld_storage]
def hello_gcs(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
Args:
data (dict): The dictionary with data specific to this type of event.
context (google.cloud.functions.Context): The Cloud Functions
event metadata.
"""
print("File: {}.".format(data['objectId']))
# [END functions_helloworld_storage]
# [START functions_http_content]
def hello_content(request):
""" Responds to an HTTP request using data from the request body parsed
according to the "content-type" header.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
"""
content_type = request.headers['content-type']
if content_type == 'application/json':
request_json = request.get_json(silent=True)
if request_json and 'name' in request_json:
name = request_json['name']
else:
raise ValueError("JSON is invalid, or missing a 'name' property")
elif content_type == 'application/octet-stream':
name = request.data
elif content_type == 'text/plain':
name = request.data
elif content_type == 'application/x-www-form-urlencoded':
name = request.form.get('name')
else:
raise ValueError("Unknown content type: {}".format(content_type))
return 'Hello {}!'.format(escape(name))
# [END functions_http_content]
# [START functions_http_methods]
def hello_method(request):
""" Responds to a GET request with "Hello world!". Forbids a PUT request.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
"""
from flask import abort
if request.method == 'GET':
return 'Hello World!'
elif request.method == 'PUT':
return abort(403)
else:
return abort(405)
# [END functions_http_methods]
def hello_error_1(request):
# [START functions_helloworld_error]
# This WILL be reported to Stackdriver Error
# Reporting, and WILL NOT show up in logs or
# terminate the function.
from google.cloud import error_reporting
client = error_reporting.Client()
try:
raise RuntimeError('I failed you')
except RuntimeError:
client.report_exception()
# This WILL be reported to Stackdriver Error Reporting,
# and WILL terminate the function
raise RuntimeError('I failed you')
# [END functions_helloworld_error]
def hello_error_2(request):
# [START functions_helloworld_error]
# WILL NOT be reported to Stackdriver Error Reporting, but will show up
# in logs
import logging
print(RuntimeError('I failed you (print to stdout)'))
logging.warn(RuntimeError('I failed you (logging.warn)'))
logging.error(RuntimeError('I failed you (logging.error)'))
sys.stderr.write('I failed you (sys.stderr.write)\n')
# This WILL be reported to Stackdriver Error Reporting
from flask import abort
return abort(500)
# [END functions_helloworld_error]
| 33.301508
| 78
| 0.681756
|
4a065a35f4902cd9eb7cc42d3f1d6a5149322a6f
| 555
|
py
|
Python
|
manage.py
|
intellisyscorp/django-project-template
|
320bb34ef31aee00af6a8c7793825c406c214ffd
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
intellisyscorp/django-project-template
|
320bb34ef31aee00af6a8c7793825c406c214ffd
|
[
"Apache-2.0"
] | 7
|
2019-07-22T04:46:03.000Z
|
2019-07-27T07:22:52.000Z
|
manage.py
|
intellisyscorp/django-project-template
|
320bb34ef31aee00af6a8c7793825c406c214ffd
|
[
"Apache-2.0"
] | 1
|
2019-07-22T04:49:39.000Z
|
2019-07-22T04:49:39.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '{{ project_name }}.settings.base')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.6875
| 87
| 0.682883
|
4a065a62d5025239c6168b4804bca762ccf97103
| 9,106
|
py
|
Python
|
src/gluonnlp/data/question_answering.py
|
StrayBird-ATSH/gluon-nlp
|
5dc6b9c9fab9e99b155554a50466c514b879ea84
|
[
"Apache-2.0"
] | null | null | null |
src/gluonnlp/data/question_answering.py
|
StrayBird-ATSH/gluon-nlp
|
5dc6b9c9fab9e99b155554a50466c514b879ea84
|
[
"Apache-2.0"
] | 1
|
2019-09-30T21:40:33.000Z
|
2019-09-30T21:40:33.000Z
|
src/gluonnlp/data/question_answering.py
|
StrayBird-ATSH/gluon-nlp
|
5dc6b9c9fab9e99b155554a50466c514b879ea84
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
"""SQuAD dataset."""
__all__ = ['SQuAD']
import json
import os
import shutil
import zipfile
import tempfile
import uuid
from mxnet.gluon.data import ArrayDataset
from mxnet.gluon.utils import download, check_sha1, _get_repo_file_url, replace_file
from .registry import register
from ..base import get_home_dir
@register(segment=['train', 'dev'])
class SQuAD(ArrayDataset):
"""Stanford Question Answering Dataset (SQuAD) - reading comprehension dataset.
From
https://rajpurkar.github.io/SQuAD-explorer/
License: CreativeCommons BY-SA 4.0
The original data format is json, which has multiple contexts (a context is a paragraph of text
from which questions are drawn). For each context there are multiple questions, and for each of
these questions there are multiple (usually 3) answers.
This class loads the json and flattens it to a table view. Each record is a single question.
Since there are more than one question per context in the original dataset, some records shares
the same context. Number of records in the dataset is equal to number of questions in json file.
The format of each record of the dataset is following:
- record_index: An index of the record, generated on the fly (0 ... to # of last question)
- question_id: Question Id. It is a string and taken from the original json file as-is
- question: Question text, taken from the original json file as-is
- context: Context text. Will be the same for questions from the same context
- answer_list: All answers for this question. Stored as python list
- start_indices: All answers' starting indices. Stored as python list.
The position in this list is the same as the position of an answer in answer_list
- is_impossible: The question is unanswerable, if version is '2.0'.
In SQuAd2.0, there are some unanswerable questions.
Parameters
----------
segment : str, default 'train'
Dataset segment. Options are 'train' and 'dev'.
version : str, default '1.1'
Dataset version. Options are '1.1' and '2.0'.
root : str, default '~/.mxnet/datasets/squad'
Path to temp folder for storing data.
Examples
--------
>>> squad = gluonnlp.data.SQuAD('dev', '1.1', root='./datasets/squad')
-etc-
>>> len(squad)
10570
>>> len(squad[0])
6
>>> tuple(type(squad[0][i]) for i in range(6))
(<class 'int'>, <class 'str'>, <class 'str'>, <class 'str'>, <class 'list'>, <class 'list'>)
>>> squad[0][0]
0
>>> squad[0][1]
'56be4db0acb8001400a502ec'
>>> squad[0][2]
'Which NFL team represented the AFC at Super Bowl 50?'
>>> squad[0][3][:70]
'Super Bowl 50 was an American football game to determine the champion '
>>> squad[0][4]
['Denver Broncos', 'Denver Broncos', 'Denver Broncos']
>>> squad[0][5]
[177, 177, 177]
>>> squad2 = gluonnlp.data.SQuAD('dev', '2.0', root='./datasets/squad')
-etc-
>>> len(squad2)
11873
>>> len(squad2[0])
7
>>> type(squad2[0][6])
<class 'bool'>
>>> squad2[0][6]
False
"""
def __init__(self, segment='train', version='1.1',
root=os.path.join(get_home_dir(), 'datasets', 'squad')):
self._data_file = {'1.1': {'train': (('train-v1.1.zip',
'052a75bf8fdb3e843b8649971658eae8133f9b0e'),
('train-v1.1.json',
'1faea1252438a64f9718412a55036b786cfcc636')),
'dev': (('dev-v1.1.zip',
'e31ad736582b72a8eabd5c0b0a38cb779ed913d7'),
('dev-v1.1.json',
'e1621aae0683b346ee9743bd5609266ba0cc34fc'))},
'2.0': {'train': (('train-v2.0.zip',
'fe497797fc090ee61a046b74eadfee51320b54fb'),
('train-v2.0.json',
'ceb2acdea93b9d82ab1829c7b1e03bee9e302c99')),
'dev': (('dev-v2.0.zip',
'de4dad80b3de9194484ca013e95a96a3e2d5603f'),
('dev-v2.0.json',
'846082d15ed71cb5220645b9d473441e00070778'))}}
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
self._root = root
self._segment = segment
self._version = version
self._get_data()
super(SQuAD, self).__init__(SQuAD._get_records(self._read_data()))
def _get_data(self):
"""Load data from the file. Does nothing if data was loaded before.
"""
(data_archive_name, archive_hash), (data_name, data_hash) \
= self._data_file[self._version][self._segment]
data_path = os.path.join(self._root, data_name)
if not os.path.exists(data_path) or not check_sha1(data_path, data_hash):
with tempfile.TemporaryDirectory(dir=self._root) as temp_dir:
file_path = download(_get_repo_file_url('gluon/dataset/squad', data_archive_name),
path=temp_dir, sha1_hash=archive_hash)
with zipfile.ZipFile(file_path, 'r') as zf:
for member in zf.namelist():
filename = os.path.basename(member)
if filename:
dest = os.path.join(self._root, filename)
temp_dst = dest + str(uuid.uuid4())
with zf.open(member) as source:
with open(temp_dst, 'wb') as target:
shutil.copyfileobj(source, target)
replace_file(temp_dst, dest)
def _read_data(self):
"""Read data.json from disk and flats it to the following format:
Entry = (record_index, question_id, question, context, answer_list, answer_start_indices).
Question id and list_of_answers also substituted with indices, so it could be later
converted into nd.array
Returns
-------
List[Tuple]
Flatten list of questions
"""
(_, _), (data_file_name, _) \
= self._data_file[self._version][self._segment]
with open(os.path.join(self._root, data_file_name)) as f:
json_data = json.load(f)
return json_data
@staticmethod
def _get_records(json_dict):
"""Provides a list of tuples with records where answers are flatten
:param dict, json_dict: File content loaded into json dictionary
Returns
-------
List[Tuple]
Flatten list of records in format: record_index, question_id, question, context,
answer, answer_start_index, is_impossible(if version is '2.0)
"""
records = []
record_index = 0
for title in json_dict['data']:
for paragraph in title['paragraphs']:
for qas in paragraph['qas']:
answers = SQuAD._get_answers(qas)
is_impossible = qas.get('is_impossible', None)
if is_impossible is not None:
record = (
record_index, qas['id'], qas['question'],
paragraph['context'], answers[0], answers[1], is_impossible
)
else:
record = (
record_index, qas['id'], qas['question'],
paragraph['context'], answers[0], answers[1])
record_index += 1
records.append(record)
return records
@staticmethod
def _get_answers(qas_dict):
answer_list = []
answer_start_list = []
for answer in qas_dict['answers']:
answer_list.append(answer['text'])
answer_start_list.append(answer['answer_start'])
return answer_list, answer_start_list
| 40.292035
| 100
| 0.578849
|
4a065a8280ed4643d348f95fd8b9d957d1484af3
| 1,318
|
py
|
Python
|
hard-gists/77a1a4d5506258f3dc1f/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/77a1a4d5506258f3dc1f/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/77a1a4d5506258f3dc1f/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
#! /usr/bin/python
# Copyright (c) 2015, Rethink Robotics, Inc.
# Using this CvBridge Tutorial for converting
# ROS images to OpenCV2 images
# http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython
# Using this OpenCV2 tutorial for saving Images:
# http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html
# rospy for the subscriber
import rospy
# ROS Image message
from sensor_msgs.msg import Image
# ROS Image message -> OpenCV2 image converter
from cv_bridge import CvBridge, CvBridgeError
# OpenCV2 for saving an image
import cv2
# Instantiate CvBridge
bridge = CvBridge()
def image_callback(msg):
print("Received an image!")
try:
# Convert your ROS Image message to OpenCV2
cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError, e:
print(e)
else:
# Save your OpenCV2 image as a jpeg
cv2.imwrite('camera_image.jpeg', cv2_img)
def main():
rospy.init_node('image_listener')
# Define your image topic
image_topic = "/cameras/left_hand_camera/image"
# Set up your subscriber and define its callback
rospy.Subscriber(image_topic, Image, image_callback)
# Spin until ctrl + c
rospy.spin()
if __name__ == '__main__':
main()
| 29.954545
| 116
| 0.73217
|
4a065ac3540ad2f19fe2d47acc3ea6321945bed6
| 458
|
py
|
Python
|
corpora_creation/noxfile.py
|
sofignatova/02books
|
9eed066fee5503c88359958708dfb8eba56e465a
|
[
"MIT"
] | 38
|
2020-12-22T01:15:38.000Z
|
2021-11-09T11:01:40.000Z
|
corpora_creation/noxfile.py
|
sofignatova/02books
|
9eed066fee5503c88359958708dfb8eba56e465a
|
[
"MIT"
] | 1
|
2020-12-21T19:11:11.000Z
|
2020-12-21T19:11:11.000Z
|
server/noxfile.py
|
sofignatova/02books
|
9eed066fee5503c88359958708dfb8eba56e465a
|
[
"MIT"
] | 3
|
2020-12-22T04:17:50.000Z
|
2020-12-22T09:03:37.000Z
|
"""Nox config for running lint and unit tests."""
import nox
@nox.session
def lint(session):
session.install("black")
session.run(
"black",
"--check",
".",
)
@nox.session
def unit(session):
session.install("-r", "requirements-test.txt")
session.run("py.test", "--quiet", *session.posargs)
@nox.session
def type_check(session):
session.install("-r", "requirements-test.txt")
session.run("mypy", ".")
| 17.615385
| 55
| 0.606987
|
4a065ba5d727ca80b397b6291531d490a27091c7
| 10,113
|
py
|
Python
|
nova/pci/stats.py
|
bopopescu/nova-17
|
2724155f4ac64aa0ef7dc25c1bf38d3c41f95b7b
|
[
"Apache-2.0"
] | 4
|
2015-04-13T14:52:41.000Z
|
2018-02-03T19:32:13.000Z
|
nova/pci/stats.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
nova/pci/stats.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | 2
|
2016-06-13T09:26:10.000Z
|
2020-07-24T01:23:26.000Z
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from nova import exception
from nova.i18n import _LE
from nova.pci import utils
from nova.pci import whitelist
LOG = logging.getLogger(__name__)
class PciDeviceStats(object):
"""PCI devices summary information.
According to the PCI SR-IOV spec, a PCI physical function can have up to
256 PCI virtual functions, thus the number of assignable PCI functions in
a cloud can be big. The scheduler needs to know all device availability
information in order to determine which compute hosts can support a PCI
request. Passing individual virtual device information to the scheduler
does not scale, so we provide summary information.
Usually the virtual functions provided by a host PCI device have the same
value for most properties, like vendor_id, product_id and class type.
The PCI stats class summarizes this information for the scheduler.
The pci stats information is maintained exclusively by compute node
resource tracker and updated to database. The scheduler fetches the
information and selects the compute node accordingly. If a comptue
node is selected, the resource tracker allocates the devices to the
instance and updates the pci stats information.
This summary information will be helpful for cloud management also.
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node']
def __init__(self, stats=None):
super(PciDeviceStats, self).__init__()
# NOTE(sbauza): Stats are a PCIDevicePoolList object
self.pools = [pci_pool.to_dict()
for pci_pool in stats] if stats else []
self.pools.sort(self.pool_cmp)
def _equal_properties(self, dev, entry, matching_keys):
return all(dev.get(prop) == entry.get(prop)
for prop in matching_keys)
def _find_pool(self, dev_pool):
"""Return the first pool that matches dev."""
for pool in self.pools:
pool_keys = pool.copy()
del pool_keys['count']
del pool_keys['devices']
if (len(pool_keys.keys()) == len(dev_pool.keys()) and
self._equal_properties(dev_pool, pool_keys, dev_pool.keys())):
return pool
def _create_pool_keys_from_dev(self, dev):
"""create a stats pool dict that this dev is supposed to be part of
Note that this pool dict contains the stats pool's keys and their
values. 'count' and 'devices' are not included.
"""
# Don't add a device that doesn't have a matching device spec.
# This can happen during initial sync up with the controller
devspec = whitelist.get_pci_device_devspec(dev)
if not devspec:
return
tags = devspec.get_tags()
pool = {k: dev.get(k) for k in self.pool_keys}
if tags:
pool.update(tags)
return pool
def add_device(self, dev):
"""Add a device to its matching pool."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
dev_pool['count'] = 0
dev_pool['devices'] = []
self.pools.append(dev_pool)
self.pools.sort(self.pool_cmp)
pool = dev_pool
pool['count'] += 1
pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(pool_list, pool, count=1):
"""Decrement pool's size by count.
If pool becomes empty, remove pool from pool_list.
"""
if pool['count'] > count:
pool['count'] -= count
count = 0
else:
count -= pool['count']
pool_list.remove(pool)
return count
def remove_device(self, dev):
"""Remove one device from the first pool that it matches."""
dev_pool = self._create_pool_keys_from_dev(dev)
if dev_pool:
pool = self._find_pool(dev_pool)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
def get_free_devs(self):
free_devs = []
for pool in self.pools:
free_devs.extend(pool['devices'])
return free_devs
def consume_requests(self, pci_requests, numa_cells=None):
alloc_devices = []
for request in pci_requests:
count = request.count
spec = request.spec
# For now, keep the same algorithm as during scheduling:
# a spec may be able to match multiple pools.
pools = self._filter_pools_for_spec(self.pools, spec)
if numa_cells:
pools = self._filter_pools_for_numa_cells(pools, numa_cells)
# Failed to allocate the required number of devices
# Return the devices already allocated back to their pools
if sum([pool['count'] for pool in pools]) < count:
LOG.error(_LE("Failed to allocate PCI devices for instance."
" Unassigning devices back to pools."
" This should not happen, since the scheduler"
" should have accurate information, and allocation"
" during claims is controlled via a hold"
" on the compute node semaphore"))
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
raise exception.PciDeviceRequestFailed(requests=pci_requests)
for pool in pools:
if pool['count'] >= count:
num_alloc = count
else:
num_alloc = pool['count']
count -= num_alloc
pool['count'] -= num_alloc
for d in range(num_alloc):
pci_dev = pool['devices'].pop()
pci_dev.request_id = request.request_id
alloc_devices.append(pci_dev)
if count == 0:
break
return alloc_devices
@staticmethod
def _filter_pools_for_spec(pools, request_specs):
return [pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)]
@staticmethod
def _filter_pools_for_numa_cells(pools, numa_cells):
# Some systems don't report numa node info for pci devices, in
# that case None is reported in pci_device.numa_node, by adding None
# to numa_cells we allow assigning those devices to instances with
# numa topology
numa_cells = [None] + [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cells
return [pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}])
for cell in numa_cells)]
def _apply_request(self, pools, request, numa_cells=None):
count = request.count
matching_pools = self._filter_pools_for_spec(pools, request.spec)
if numa_cells:
matching_pools = self._filter_pools_for_numa_cells(matching_pools,
numa_cells)
if sum([pool['count'] for pool in matching_pools]) < count:
return False
else:
for pool in matching_pools:
count = self._decrease_pool_count(pools, pool, count)
if not count:
break
return True
def support_requests(self, requests, numa_cells=None):
"""Check if the pci requests can be met.
Scheduler checks compute node's PCI stats to decide if an
instance can be scheduled into the node. Support does not
mean real allocation.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
# note (yjiang5): this function has high possibility to fail,
# so no exception should be triggered for performance reason.
pools = copy.deepcopy(self.pools)
return all([self._apply_request(pools, r, numa_cells)
for r in requests])
def apply_requests(self, requests, numa_cells=None):
"""Apply PCI requests to the PCI stats.
This is used in multiple instance creation, when the scheduler has to
maintain how the resources are consumed by the instances.
If numa_cells is provided then only devices contained in
those nodes are considered.
"""
if not all([self._apply_request(self.pools, r, numa_cells)
for r in requests]):
raise exception.PciDeviceRequestFailed(requests=requests)
@staticmethod
def pool_cmp(dev1, dev2):
return len(dev1) - len(dev2)
def __iter__(self):
# 'devices' shouldn't be part of stats
pools = []
for pool in self.pools:
tmp = {k: v for k, v in pool.iteritems() if k != 'devices'}
pools.append(tmp)
return iter(pools)
def clear(self):
"""Clear all the stats maintained."""
self.pools = []
| 40.290837
| 78
| 0.614061
|
4a065bf785d2324ae78b8ef7edfff6abd1c120d3
| 9,351
|
py
|
Python
|
parlai/agents/ir_baseline/ir_baseline.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | 2
|
2017-09-20T21:49:51.000Z
|
2018-08-12T06:58:10.000Z
|
parlai/agents/ir_baseline/ir_baseline.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | 7
|
2021-01-12T01:07:03.000Z
|
2022-03-12T00:50:45.000Z
|
parlai/agents/ir_baseline/ir_baseline.py
|
min942773/parlai_wandb
|
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
|
[
"MIT"
] | 1
|
2021-01-07T11:45:03.000Z
|
2021-01-07T11:45:03.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Simple IR baselines.
We plan to implement the following variants:
Given an input message, either:
(i) find the most similar message in the (training) dataset and output the
response from that exchange; or
(ii) find the most similar response to the input directly.
(iii) if label_candidates are provided, simply ranks them according to their
similarity to the input message.
Currently only (iii) is used.
Additonally, TFIDF is either used (requires building a dictionary) or not,
depending on whether you train on the train set first, or not.
"""
import math
from collections.abc import Sequence
import heapq
import json
import parlai.utils.torch as torch_utils
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from parlai.utils.io import PathManager
class MaxPriorityQueue(Sequence):
"""
Fixed-size priority queue keeping the max_size largest items.
"""
def __init__(self, max_size):
"""
Initialize priority queue.
:param max_size: maximum capacity of priority queue.
"""
self.capacity = max_size
self.lst = []
def add(self, item, priority):
"""
Add element to the queue, with a separate priority if desired.
Element will not be added if the queue is at capacity and the element
has lower priority than the lowest currently in the queue.
:param item:
item to add to queue.
:param priority:
priority to use for item.
"""
if len(self.lst) < self.capacity:
heapq.heappush(self.lst, (priority, item))
elif priority > self.lst[0][0]:
heapq.heapreplace(self.lst, (priority, item))
def __getitem__(self, key):
"""
Get item at specified index.
:param key: integer index into priority queue, 0 <= index < max_size.
:returns: item stored at the specified index.
"""
return sorted(self.lst)[key][1]
def __len__(self):
"""
Return length of priority queue.
"""
return len(self.lst)
stopwords = {
'i',
'a',
'an',
'are',
'about',
'as',
'at',
'be',
'by',
'for',
'from',
'how',
'in',
'is',
'it',
'of',
'on',
'or',
'that',
'the',
'this',
'to',
'was',
'what',
'when',
'where',
'--',
'?',
'.',
"''",
"''",
"``",
',',
'do',
'see',
'want',
'people',
'and',
"n't",
"me",
'too',
'own',
'their',
'*',
"'s",
'not',
'than',
'other',
'you',
'your',
'know',
'just',
'but',
'does',
'really',
'have',
'into',
'more',
'also',
'has',
'any',
'why',
'will',
}
def score_match(query_rep, text, length_penalty, dictionary=None):
"""
Calculate the score match between the query representation the text.
:param query_rep:
base query representation to match text again.
:param text:
string to compare against query_rep for matching tokens
:param length_penalty:
scores are divided by the norm taken to this power
:param dictionary:
optional dictionary to use to tokenize text
:returns:
float score of match
"""
if text == "":
return 0
words = [w for w in dictionary.tokenize(text.lower())]
score = 0
rw = query_rep['words']
used = {}
for w in words:
if w in rw and w not in used:
score += rw[w]
used[w] = True
norm = math.sqrt(len(used))
norm = math.pow(norm * query_rep['norm'], length_penalty)
if norm > 1:
score /= norm
return score
def rank_candidates(query_rep, cands, length_penalty, dictionary):
"""
Rank candidates given representation of query.
:param query_rep:
base query representation to match text again.
:param cands:
strings to compare against query_rep for matching tokens
:param length_penalty:
scores are divided by the norm taken to this power
:dictionary:
dictionary to use to tokenize text
:returns:
ordered list of candidate strings in score-ranked order
"""
if True:
mpq = MaxPriorityQueue(100)
for c in cands:
score = score_match(query_rep, c, length_penalty, dictionary)
mpq.add(c, score)
return list(reversed(mpq))
else:
cands = list(cands)
score = [0] * len(cands)
for i, c in enumerate(cands):
score[i] = -score_match(query_rep, c, length_penalty, dictionary)
r = [i[0] for i in sorted(enumerate(score), key=lambda x: x[1])]
res = []
for i in range(min(100, len(score))):
res.append(cands[r[i]])
return res
class IrBaselineAgent(Agent):
"""
Information Retrieval baseline.
"""
@staticmethod
def add_cmdline_args(parser):
"""
Add command line args specific to this agent.
"""
parser = parser.add_argument_group('IrBaseline Arguments')
parser.add_argument(
'-lp',
'--length_penalty',
type=float,
default=0.5,
help='length penalty for responses',
)
parser.add_argument(
'-hsz',
'--history_size',
type=int,
default=1,
help='number of utterances from the dialogue history to take use '
'as the query',
)
parser.add_argument(
'--label_candidates_file',
type=str,
default=None,
help='file of candidate responses to choose from',
)
DictionaryAgent.add_cmdline_args(parser)
def __init__(self, opt, shared=None):
"""
Initialize agent.
"""
super().__init__(opt)
self.id = 'IRBaselineAgent'
self.length_penalty = float(opt['length_penalty'])
self.dictionary = DictionaryAgent(opt)
self.opt = opt
self.history = []
self.episodeDone = True
if opt.get('label_candidates_file'):
f = open(opt.get('label_candidates_file'))
self.label_candidates = f.read().split('\n')
def reset(self):
"""
Reset agent properties.
"""
self.observation = None
self.history = []
self.episodeDone = True
def observe(self, obs):
"""
Store and remember incoming observation message dict.
"""
self.observation = obs
self.dictionary.observe(obs)
if self.episodeDone:
self.history = []
if 'text' in obs:
self.history.append(obs.get('text', ''))
self.episodeDone = obs.get('episode_done', False)
return obs
def act(self):
"""
Generate a response to the previously seen observation(s).
"""
if self.opt.get('datatype', '').startswith('train'):
self.dictionary.act()
obs = self.observation
reply = {}
reply['id'] = self.getID()
# Rank candidates
cands = None
if obs.get('label_candidates', False) and len(obs['label_candidates']) > 0:
cands = obs['label_candidates']
if hasattr(self, 'label_candidates'):
# override label candidates with candidate file if set
cands = self.label_candidates
if cands:
hist_sz = self.opt.get('history_size', 1)
left_idx = max(0, len(self.history) - hist_sz)
text = ' '.join(self.history[left_idx : len(self.history)])
rep = self.build_query_representation(text)
reply['text_candidates'] = rank_candidates(
rep, cands, self.length_penalty, self.dictionary
)
reply['text'] = reply['text_candidates'][0]
return reply
def save(self, path=None):
"""
Save dictionary tokenizer if available.
"""
path = self.opt.get('model_file', None) if path is None else path
if path:
self.dictionary.save(path + '.dict')
data = {}
data['opt'] = self.opt
torch_utils.atomic_save(data, path)
with PathManager.open(path + '.opt', 'w') as handle:
json.dump(self.opt, handle)
def build_query_representation(self, query):
"""
Build representation of query, e.g. words or n-grams.
:param query: string to represent.
:returns: dictionary containing 'words' dictionary (token => frequency)
and 'norm' float (square root of the number of tokens)
"""
rep = {}
rep['words'] = {}
words = [w for w in self.dictionary.tokenize(query.lower())]
rw = rep['words']
used = {}
for w in words:
assert len(self.dictionary.freq) > 0
rw[w] = 1.0 / (1.0 + math.log(1.0 + self.dictionary.freq[w]))
used[w] = True
rep['norm'] = math.sqrt(len(words))
return rep
| 26.87069
| 83
| 0.566357
|
4a065cfcfeaddb60c8b5b107f73c69eda2fb650e
| 9,968
|
py
|
Python
|
users.py
|
Jon-LaFlamme/cribbage
|
a74adeb4d3ebda0ec36d6612eab4faa9a160c0fd
|
[
"MIT"
] | null | null | null |
users.py
|
Jon-LaFlamme/cribbage
|
a74adeb4d3ebda0ec36d6612eab4faa9a160c0fd
|
[
"MIT"
] | null | null | null |
users.py
|
Jon-LaFlamme/cribbage
|
a74adeb4d3ebda0ec36d6612eab4faa9a160c0fd
|
[
"MIT"
] | null | null | null |
#TODO(Jon) This module will require online components for production, but will be locally implemented in this package
import os
import json
MATCH_TEMPLATE = {'win': 0, 'was_skunked': 0, 'was_dbl_skunked': 0, 'skunked_opponent': 0, 'dbl_skunked_opponent': 0}
DIFFICULTY_MAP = {'beginner': 1, 'intermediate': 2, 'expert': 3}
GAME_MODES = {'vs_humans','computer_easy','computer_med','computer_hard'}
BADGES = {'win_streak_3','hand_of_eight','hand_of_twelve','hand_of_sixteen','hand_of_twenty',
'hand_of_twenty-four','hand_of_twenty-eight','hand_of_twenty-nine','peg_five',
'peg_eight','peg_twelve','three_skunks','three_dbl_skunks','rank_status'}
PROFILE_TEMPLATE = {'email': 'none',
'rank': 0,
'credits': 0,
'badges': {'win_streak_3': 0, #0: not achieved, #1 achieved on easy, #2 achieved on medium, #3 achieved on hard
'hand_of_eight': 0,
'hand_of_twelve': 0,
'hand_of_sixteen': 0,
'hand_of_twenty': 0,
'hand_of_twenty-four': 0,
'hand_of_twenty-eight': 0,
'hand_of_twenty-nine': 0,
'peg_five': 0,
'peg_eight': 0,
'peg_twelve': 0,
'three_skunks': 0,
'three_dbl_skunks': 0,
'rank_status': 0,}, #0: beginner, #1: intermediate, #2: advanced, #3: elite
'unlocked_boards': {'classic_1': 0,'ultimate_1': 0}, #0: not won, #1 won on easy, #2 won on medium, #3 won on hard
'vs_humans': {'skunks':0,'skunked':0,'dbl_skunks':0,'dbl_skunked':0,'wins':0,'losses':0},
'computer_beginner': {'skunks':0,'skunked':0,'dbl_skunks':0,'dbl_skunked':0,'wins':0,'losses':0},
'computer_intermediate': {'skunks':0,'skunked':0,'dbl_skunks':0,'dbl_skunked':0,'wins':0,'losses':0},
'computer_expert': {'skunks':0,'skunked':0,'dbl_skunks':0,'dbl_skunked':0,'wins':0,'losses':0}
}
#returns the new or existing user after successfull sign-in or new profile created
def sign_in():
invalid = True
while invalid:
print('\n ======== User Sign-In ======== \n')
print('1) Sign-in to an existing account.')
print('2) Create a new account.')
selection = int(input('Make a selection: '))
if selection == 1:
while invalid:
username = input('\nEnter your username: ').lower()
email = input('Enter your email: ').lower()
feedback = lookup_user(username=username, email=email)
if feedback == 'fna':
print('email does not match username.')
option = input('Enter 0 to return to menu. Any other key to try again:')
if option == 0:
break
if feedback == 'fa':
u = User(username=username, email=email)
print('Loading profile.')
return u
elif selection == 2:
while invalid:
username = input('\nCreate a username: ').lower()
email = input('Enter your email: ').lower()
feedback = lookup_user(username=username, email=email)
if feedback == 'nf':
add_user(username=username, email=email)
u = User(username=username, email=email)
print('User profile created.')
return u
else:
print(f'username: {username} is already claimed. Please try again.')
else:
print('Invalid selection. Please try again.')
#Found but not authenticated: 'fna', Found and authenticated: 'fa', Not found: 'nf'
def lookup_user(username=None, email=None):
with open('user_directory.json','r') as f:
user_directory = json.load(f)
if username in user_directory:
if user_directory[username]['email'] == email:
return 'fa'
else:
return 'fna'
else:
return 'nf'
def add_user(username=None, email=None):
with open('user_directory.json','r') as f:
user_directory = json.load(f)
user_directory[username] = {'email': email, 'rank': 0}
with open('user_directory.json', 'w') as f:
json.dump(user_directory, f)
class User():
def __init__(self, username=None, email=None):
self.name = username
self.match_stats = MATCH_TEMPLATE
if os.path.exists(f'{self.name}.json'):
with open(f'{self.name}.json','r') as f:
self.profile = json.load(f)
else:
self.profile = {username: PROFILE_TEMPLATE}
self.profile[username]['email'] = email
with open(f'{self.name}.json', 'w') as f:
json.dump(self.profile, f)
def add_badge(self, badge=None, difficulty=None):
if badge in BADGES and difficulty in DIFFICULTY_MAP:
self.profile[self.name]['badges'][badge] = DIFFICULTY_MAP[difficulty]
def new_credits_calculator(self):
#TODO(Jon) Create function that calculates the new credits awarded a user after achieving various tasks. Done once per game at end.
#Requires a credits dictionary mapping credit value for various achievements
#In-app purchases can also purchase credits
pass
def add_credits(self, credits=None):
self.profile[self.name]['credits'] += credits
def update_unlocked_boards(self, board=None, difficulty=None):
if difficulty in DIFFICULTY_MAP:
value = DIFFICULTY_MAP[difficulty]
if board in self.profile[self.name]['unlocked_boards']:
#Only overwrite old scores if achieved at a greater difficulty level
if value > self.profile[self.name]['unlocked_boards'][board]:
self.profile[self.name]['unlocked_boards'][board] = value
else: self.profile[self.name]['unlocked_boards'][board] = value
def compute_new_rank(self):
rank = self.profile[self.name]['rank']
outcome = 0
penalty = 0
bonus = 0
if rank < 1000:
weighted_gain = 100
weighted_loss = 50
elif rank < 2000:
weighted_gain = 75
weighted_loss = 50
elif rank < 3000:
weighted_gain = 50
weighted_loss = 50
else:
weighted_gain = 25
weighted_loss = 50
if self.match_stats['win'] == 1:
outcome += weighted_gain
else:
outcome -= weighted_loss
if self.match_stats['was_skunked'] == 1:
penalty = 50
elif self.match_stats['was_dbl_skunked'] == 1:
penalty = 100
elif self.match_stats['skunked_opponent'] == 1:
bonus = 50
elif self.match_stats['dbl_skunked_opponent'] == 1:
bonus = 100
return rank + outcome + bonus - penalty
def update_profile(self, game_mode=None):
if game_mode in GAME_MODES:
#stats to update: {'skunks':0,'skunked':0,'dbl_skunks':0,'dbl_skunked':0,'wins':0,'losses':0}
self.profile[self.name][game_mode]['skunks'] += self.match_stats['skunked_opponent']
self.profile[self.name][game_mode]['skunked'] += self.match_stats['was_skunked']
self.profile[self.name][game_mode]['dbl_skunks'] += self.match_stats['dbl_skunked_opponent']
self.profile[self.name][game_mode]['dbl_skunked'] += self.match_stats['was_dbl_skunked']
if self.match_stats['win'] == 1:
self.profile[self.name][game_mode]['wins'] += 1
else:
self.profile[self.name][game_mode]['losses'] += 1
self.profile[self.name]['rank'] = self.compute_new_rank()
def save_updated_profile(self):
with open(f'{self.name}.json', 'w') as f:
json.dump(self.profile, f)
def display_stats(self):
rank = self.profile[self.name]['rank']
credits = self.profile[self.name]['credits']
badges = self.profile[self.name]['badges']
boards = self.profile[self.name]['unlocked_boards']
wins = self.profile[self.name]['vs_humans']['wins']
losses = self.profile[self.name]['vs_humans']['wins']
skunks = self.profile[self.name]['vs_humans']['skunks']
skunked = self.profile[self.name]['vs_humans']['skunked']
dbl_skunks = self.profile[self.name]['vs_humans']['dbl_skunks']
dbl_skunked = self.profile[self.name]['vs_humans']['dbl_skunked']
easy = self.profile[self.name]['computer_easy']
medium = self.profile[self.name]['computer_med']
hard = self.profile[self.name]['computer_hard']
print(f'======== Player stats for {self.name} ========\n')
print(f'Rank: {rank}')
print(f'Credits: {credits}')
print(f'Badges: {badges}')
print(f'Boards unlocked: {boards}')
print('============================================== \n')
print(' Versus Humans \n')
print(f'WINS: {wins}')
print(f'LOSSES: {losses}')
print(f'SKUNKS: {skunks}')
print(f'SKUNKED: {skunked}')
print(f'DOUBLE SKUNKS: {dbl_skunks}')
print(f'DOUBLE SKUNKED: {dbl_skunked}')
print('============================================== \n')
print(' Versus Computer \n')
print(f'BEGINNER: {easy}')
print(f'INTERMEDIATE: {medium}')
print(f'EXPERT: {hard}')
| 45.935484
| 139
| 0.547251
|
4a065cfd680c545b5f2bbb15a94d396634d98162
| 798
|
py
|
Python
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-49.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1
|
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-49.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5
|
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-I/program-49.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
###################################################################################
# #
# Program purpose: List all files in a directory. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : August 9, 2019 #
# #
###################################################################################
if __name__ == "__main__":
from os import listdir
from os.path import isfile, join
files_list = [f for f in listdir("./") if isfile(join("./", f))]
print(files_list)
| 46.941176
| 83
| 0.295739
|
4a065d74c45dc34c155525d4dc16548d9ebf08f9
| 2,102
|
py
|
Python
|
src/cli/scanner.py
|
my-tradingbot/scanner
|
ed932ed593aebe00022a6de213589b77ccff114d
|
[
"MIT"
] | null | null | null |
src/cli/scanner.py
|
my-tradingbot/scanner
|
ed932ed593aebe00022a6de213589b77ccff114d
|
[
"MIT"
] | null | null | null |
src/cli/scanner.py
|
my-tradingbot/scanner
|
ed932ed593aebe00022a6de213589b77ccff114d
|
[
"MIT"
] | null | null | null |
import argparse
import os
import alpaca_trade_api as tradeapi
from common import BaseClient
"""
Create a new login session by setting environmental variables or using the login command
$ export ALPACAURL="https://paper-api.alpaca.markets"
$ export ALPACATOKENID="token-id"
$ export ALPACATOKENSECRET="token-secret"
or
python3 scan.py -l -url "https://paper-api.alpaca.markets" -keyid "yourkeyid" -keysecret "keysecret"
"""
class Client(BaseClient):
def query(self, suffix):
res = self._http_request(
method='Get',
url_suffix='/v2/'+suffix
)
return res
def test_api(client):
try:
market_clock = client.query('clock')
return f'Test is Successful,{market_clock} '
except ConnectionError as err_msg:
raise ConnectionError(err_msg)
def main():
parser = argparse.ArgumentParser(description="ALPACA API Login Session Parameters")
parser.add_argument('-url', dest='url', help='API URL')
parser.add_argument('-tokenid', dest='tokenid', help='API Token ID')
parser.add_argument('-tokensecret', dest='tokensecret', help='API Token Secret')
args = parser.parse_args()
if args.url and args.tokenid and args.tokensecret:
url, token_id, token_secret = args.url,args.tokenid,args.tokensecret
else:
try:
url = os.environ["ALPACAURL"]
token_id = os.environ["ALPACATOKENID"]
token_secret = os.environ["ALPACATOKENSECRET"]
except:
raise ValueError("No enviromental variables configured, please rerun the scanner with API credentials")
headers = {
"APCA-API-KEY-ID": token_id,
"APCA-API-SECRET-KEY": token_secret
}
try:
client = Client(
base_url=url,
verify=False,
headers=headers,
ok_codes=(200, 201, 204),
)
except Exception as e:
raise (str(f'Failed to execute your command. Error: {str(e)}'))
print(test_api(client))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 28.026667
| 115
| 0.641294
|
4a065dbdb8f95936dc62a9d6987b3b1c7bf05d9e
| 1,338
|
py
|
Python
|
python_sources/slave_node/slave.py
|
BPChain/private-multichain
|
e81354996f220b27a34523db9f584f6864cac548
|
[
"MIT"
] | 5
|
2018-01-31T12:20:24.000Z
|
2021-08-15T16:30:55.000Z
|
python_sources/slave_node/slave.py
|
BPChain/private-multichain
|
e81354996f220b27a34523db9f584f6864cac548
|
[
"MIT"
] | 1
|
2021-06-01T22:10:17.000Z
|
2021-06-01T22:10:17.000Z
|
python_sources/slave_node/slave.py
|
BPChain/private-multichain
|
e81354996f220b27a34523db9f584f6864cac548
|
[
"MIT"
] | null | null | null |
"""I send my multichain username and password and my ip to a master node so he can control my local
multichan instance """
import http.client
import json
import socket
from time import sleep
from ..data_acquisition.multichain_connector import read_user_and_password, read_rpc_port
from ..project_logger import set_up_logging
LOG = set_up_logging(__name__)
def get_credentials():
user, password = read_user_and_password()
return user, password, read_rpc_port()
def send_credentials():
credentials_sent = False
while not credentials_sent:
try:
conn = http.client.HTTPConnection('masternode', 60000)
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
user, password, rpc_port = get_credentials()
credentials = {'user': user,
'password': password,
'host': socket.gethostbyname(socket.gethostname()),
'rpc_port': rpc_port}
json_data = json.dumps(credentials)
conn.request('POST', '/post', json_data, headers)
credentials_sent = True
except Exception as error:
LOG.error(error)
sleep(5)
if __name__ == '__main__':
sleep(10)
send_credentials()
sleep(60)
| 30.409091
| 99
| 0.62855
|
4a065de662134cf8329c5fb7c6ad3ce5cb71dc1d
| 37,275
|
py
|
Python
|
tests/test_imapclient.py
|
BoniLindsley/imapclient
|
8ececac1c7aa4d7eb5c8846c88e8c41ce08c6340
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_imapclient.py
|
BoniLindsley/imapclient
|
8ececac1c7aa4d7eb5c8846c88e8c41ce08c6340
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_imapclient.py
|
BoniLindsley/imapclient
|
8ececac1c7aa4d7eb5c8846c88e8c41ce08c6340
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2014, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
from __future__ import unicode_literals
import itertools
import socket
import sys
from datetime import datetime
import logging
import six
from select import POLLIN
from imapclient.exceptions import CapabilityError, IMAPClientError, ProtocolError
from imapclient.imapclient import (
IMAPlibLoggerAdapter,
_parse_quota,
Quota,
MailboxQuotaRoots,
require_capability,
_literal,
)
from imapclient.fixed_offset import FixedOffset
from imapclient.testable_imapclient import TestableIMAPClient as IMAPClient
from .imapclient_test import IMAPClientTest
from .util import patch, sentinel, Mock
class TestListFolders(IMAPClientTest):
def test_list_folders(self):
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
sentinel.folder_data,
)
self.client._proc_folder_list = Mock(return_value=sentinel.folder_list)
folders = self.client.list_folders("foo", "bar")
self.client._imap._simple_command.assert_called_once_with(
"LIST", b'"foo"', b'"bar"'
)
self.assertEqual(
self.client._proc_folder_list.call_args, ((sentinel.folder_data,), {})
)
self.assertTrue(folders is sentinel.folder_list)
def test_list_sub_folders(self):
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LSUB",
sentinel.folder_data,
)
self.client._proc_folder_list = Mock(return_value=sentinel.folder_list)
folders = self.client.list_sub_folders("foo", "bar")
self.client._imap._simple_command.assert_called_once_with(
"LSUB", b'"foo"', b'"bar"'
)
self.assertEqual(
self.client._proc_folder_list.call_args, ((sentinel.folder_data,), {})
)
self.assertTrue(folders is sentinel.folder_list)
def test_list_folders_NO(self):
self.client._imap._simple_command.return_value = ("NO", [b"badness"])
self.assertRaises(IMAPClientError, self.client.list_folders)
def test_list_sub_folders_NO(self):
self.client._imap._simple_command.return_value = ("NO", [b"badness"])
self.assertRaises(IMAPClientError, self.client.list_folders)
def test_utf7_decoding(self):
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
[
b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Hello&AP8-world"',
],
)
folders = self.client.list_folders("foo", "bar")
self.client._imap._simple_command.assert_called_once_with(
"LIST", b'"foo"', b'"bar"'
)
self.assertEqual(
folders,
[
((b"\\HasNoChildren",), b"/", "A"),
((b"\\HasNoChildren",), b"/", "Hello\xffworld"),
],
)
def test_folder_encode_off(self):
self.client.folder_encode = False
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
[
b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Hello&AP8-world"',
],
)
folders = self.client.list_folders("foo", "bar")
self.client._imap._simple_command.assert_called_once_with(
"LIST", '"foo"', '"bar"'
)
self.assertEqual(
folders,
[
((b"\\HasNoChildren",), b"/", b"A"),
((b"\\HasNoChildren",), b"/", b"Hello&AP8-world"),
],
)
def test_simple(self):
folders = self.client._proc_folder_list(
[
b'(\\HasNoChildren) "/" "A"',
b'(\\HasNoChildren) "/" "Foo Bar"',
]
)
self.assertEqual(
folders,
[
(
(b"\\HasNoChildren",),
b"/",
"A",
),
((b"\\HasNoChildren",), b"/", "Foo Bar"),
],
)
def test_without_quotes(self):
folders = self.client._proc_folder_list(
[
b'(\\HasNoChildren) "/" A',
b'(\\HasNoChildren) "/" B',
b'(\\HasNoChildren) "/" C',
]
)
self.assertEqual(
folders,
[
((b"\\HasNoChildren",), b"/", "A"),
((b"\\HasNoChildren",), b"/", "B"),
((b"\\HasNoChildren",), b"/", "C"),
],
)
def test_unquoted_numeric_folder_name(self):
# Some IMAP implementations do this
folders = self.client._proc_folder_list([b'(\\HasNoChildren) "/" 123'])
self.assertEqual(folders, [((b"\\HasNoChildren",), b"/", "123")])
def test_unquoted_numeric_folder_name_parsed_as_long(self):
# big enough numeric values might get parsed as longs
folder_name = str(sys.maxsize + 1)
folders = self.client._proc_folder_list(
[b'(\\HasNoChildren) "/" ' + folder_name.encode("ascii")]
)
self.assertEqual(folders, [((b"\\HasNoChildren",), b"/", folder_name)])
def test_mixed(self):
folders = self.client._proc_folder_list(
[
b'(\\HasNoChildren) "/" Alpha',
b'(\\HasNoChildren) "/" "Foo Bar"',
b'(\\HasNoChildren) "/" C',
]
)
self.assertEqual(
folders,
[
((b"\\HasNoChildren",), b"/", "Alpha"),
((b"\\HasNoChildren",), b"/", "Foo Bar"),
((b"\\HasNoChildren",), b"/", "C"),
],
)
def test_funky_characters(self):
folders = self.client._proc_folder_list(
[
(b'(\\NoInferiors \\UnMarked) "/" {5}', "bang\xff"),
b"",
b'(\\HasNoChildren \\UnMarked) "/" "INBOX"',
]
)
self.assertEqual(
folders,
[
((b"\\NoInferiors", b"\\UnMarked"), b"/", "bang\xff"),
((b"\\HasNoChildren", b"\\UnMarked"), b"/", "INBOX"),
],
)
def test_quoted_specials(self):
folders = self.client._proc_folder_list(
[
br'(\HasNoChildren) "/" "Test \"Folder\""',
br'(\HasNoChildren) "/" "Left\"Right"',
br'(\HasNoChildren) "/" "Left\\Right"',
br'(\HasNoChildren) "/" "\"Left Right\""',
br'(\HasNoChildren) "/" "\"Left\\Right\""',
]
)
self.assertEqual(
folders,
[
((b"\\HasNoChildren",), b"/", 'Test "Folder"'),
((b"\\HasNoChildren",), b"/", 'Left"Right'),
((b"\\HasNoChildren",), b"/", r"Left\Right"),
((b"\\HasNoChildren",), b"/", r'"Left Right"'),
((b"\\HasNoChildren",), b"/", r'"Left\Right"'),
],
)
def test_empty_response(self):
self.assertEqual(self.client._proc_folder_list([None]), [])
def test_blanks(self):
folders = self.client._proc_folder_list(
["", None, br'(\HasNoChildren) "/" "last"']
)
self.assertEqual(folders, [((br"\HasNoChildren",), b"/", "last")])
class TestFindSpecialFolder(IMAPClientTest):
def test_find_special_folder_with_special_use(self):
self.client._cached_capabilities = (b"SPECIAL-USE",)
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
[
b'(\\HasNoChildren) "/" "INBOX"',
b'(\\HasNoChildren \\Sent) "/" "Sent"',
],
)
folder = self.client.find_special_folder(b"\\Sent")
self.assertEqual(folder, "Sent")
def test_find_special_folder_with_special_use_single_flag(self):
self.client._cached_capabilities = (b"SPECIAL-USE",)
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
[
b'(\\HasNoChildren) "/" "INBOX"',
b'(\\Sent) "/" "Sent"',
],
)
folder = self.client.find_special_folder(b"\\Sent")
self.assertEqual(folder, "Sent")
def test_find_special_folder_without_special_use_nor_namespace(self):
self.client._cached_capabilities = (b"FOO",)
self.client._imap._simple_command.return_value = ("OK", [b"something"])
self.client._imap._untagged_response.return_value = (
"LIST",
[
b'(\\HasNoChildren) "/" "Sent Items"',
],
)
folder = self.client.find_special_folder(b"\\Sent")
self.assertEqual(folder, "Sent Items")
class TestSelectFolder(IMAPClientTest):
def test_normal(self):
self.client._command_and_check = Mock()
self.client._imap.untagged_responses = {
b"exists": [b"3"],
b"FLAGS": [br"(\Flagged \Deleted abc [foo]/bar def)"],
b"HIGHESTMODSEQ": [b"127110"],
b"OK": [
br"[PERMANENTFLAGS (\Flagged \Deleted abc [foo]/bar def \*)] Flags permitted.",
b"[UIDVALIDITY 631062293] UIDs valid.",
b"[UIDNEXT 1281] Predicted next UID.",
b"[HIGHESTMODSEQ 127110]",
],
b"PERMANENTFLAGS": [br"(\Flagged \Deleted abc [foo"],
b"READ-WRITE": [b""],
b"RECENT": [b"0"],
b"UIDNEXT": [b"1281"],
b"UIDVALIDITY": [b"631062293"],
b"OTHER": [b"blah"],
}
result = self.client.select_folder(b"folder_name", sentinel.readonly)
self.client._command_and_check.assert_called_once_with(
"select", b'"folder_name"', sentinel.readonly
)
self.maxDiff = 99999
self.assertEqual(
result,
{
b"EXISTS": 3,
b"RECENT": 0,
b"UIDNEXT": 1281,
b"UIDVALIDITY": 631062293,
b"HIGHESTMODSEQ": 127110,
b"FLAGS": (br"\Flagged", br"\Deleted", b"abc", b"[foo]/bar", b"def"),
b"PERMANENTFLAGS": (
br"\Flagged",
br"\Deleted",
b"abc",
b"[foo]/bar",
b"def",
br"\*",
),
b"READ-WRITE": True,
b"OTHER": [b"blah"],
},
)
def test_unselect(self):
self.client._cached_capabilities = [b"UNSELECT"]
self.client._imap._simple_command.return_value = ("OK", ["Unselect completed."])
# self.client._imap._untagged_response.return_value = (
# b'OK', [b'("name" "GImap" "vendor" "Google, Inc.")'])
result = self.client.unselect_folder()
self.assertEqual(result, "Unselect completed.")
self.client._imap._simple_command.assert_called_with("UNSELECT")
class TestAppend(IMAPClientTest):
def test_without_msg_time(self):
self.client._imap.append.return_value = ("OK", [b"Good"])
msg = "hi"
self.client.append("foobar", msg, ["FLAG", "WAVE"], None)
self.client._imap.append.assert_called_with(
b'"foobar"', "(FLAG WAVE)", None, b"hi"
)
@patch("imapclient.imapclient.datetime_to_INTERNALDATE")
def test_with_msg_time(self, datetime_to_INTERNALDATE):
datetime_to_INTERNALDATE.return_value = "somedate"
self.client._imap.append.return_value = ("OK", [b"Good"])
msg = b"bye"
self.client.append(
"foobar",
msg,
["FLAG", "WAVE"],
datetime(2009, 4, 5, 11, 0, 5, 0, FixedOffset(2 * 60)),
)
self.assertTrue(datetime_to_INTERNALDATE.called)
self.client._imap.append.assert_called_with(
b'"foobar"', "(FLAG WAVE)", '"somedate"', msg
)
def test_multiappend(self):
self.client._cached_capabilities = (b"MULTIAPPEND",)
self.client._raw_command = Mock()
self.client.multiappend("foobar", ["msg1", "msg2"])
self.client._raw_command.assert_called_once_with(
b"APPEND", [b'"foobar"', b"msg1", b"msg2"], uid=False
)
class TestAclMethods(IMAPClientTest):
def setUp(self):
super(TestAclMethods, self).setUp()
self.client._cached_capabilities = [b"ACL"]
def test_getacl(self):
self.client._imap.getacl.return_value = (
"OK",
[b"INBOX Fred rwipslda Sally rwip"],
)
acl = self.client.getacl("INBOX")
self.assertSequenceEqual(acl, [(b"Fred", b"rwipslda"), (b"Sally", b"rwip")])
def test_setacl(self):
self.client._imap.setacl.return_value = ("OK", [b"SETACL done"])
response = self.client.setacl("folder", sentinel.who, sentinel.what)
self.client._imap.setacl.assert_called_with(
b'"folder"', sentinel.who, sentinel.what
)
self.assertEqual(response, b"SETACL done")
class TestQuota(IMAPClientTest):
def setUp(self):
super(TestQuota, self).setUp()
self.client._cached_capabilities = [b"QUOTA"]
def test_parse_quota(self):
self.assertEqual(_parse_quota([]), [])
self.assertEqual(
_parse_quota([b'"User quota" (STORAGE 586720 4882812)']),
[Quota("User quota", "STORAGE", 586720, 4882812)],
)
self.assertEqual(
_parse_quota(
[
b'"User quota" (STORAGE 586720 4882812)',
b'"Global quota" (MESSAGES 42 1000)',
]
),
[
Quota("User quota", "STORAGE", 586720, 4882812),
Quota("Global quota", "MESSAGES", 42, 1000),
],
)
self.assertEqual(
_parse_quota(
[
b'"User quota" (STORAGE 586720 4882812 MESSAGES 42 1000)',
]
),
[
Quota("User quota", "STORAGE", 586720, 4882812),
Quota("User quota", "MESSAGES", 42, 1000),
],
)
def test__get_quota(self):
self.client._command_and_check = Mock()
self.client._command_and_check.return_value = [
b'"User quota" (MESSAGES 42 1000)'
]
quotas = self.client._get_quota("foo")
self.client._command_and_check.assert_called_once_with("getquota", '"foo"')
self.assertEqual(quotas, [Quota("User quota", "MESSAGES", 42, 1000)])
def test_set_quota(self):
self.client._raw_command_untagged = Mock()
self.client._raw_command_untagged.return_value = [
b'"User quota" (STORAGE 42 1000 MESSAGES 42 1000)'
]
quotas = [
Quota("User quota", "STORAGE", 42, 1000),
Quota("User quota", "MESSAGES", 42, 1000),
]
resp = self.client.set_quota(quotas)
self.client._raw_command_untagged.assert_called_once_with(
b"SETQUOTA",
[b'"User quota"', b"(STORAGE 1000 MESSAGES 1000)"],
uid=False,
response_name="QUOTA",
)
self.assertListEqual(resp, quotas)
def test_get_quota_root(self):
self.client._raw_command_untagged = Mock()
self.client._raw_command_untagged.return_value = [b'"INBOX" "User quota"']
self.client._imap.untagged_responses = dict()
resp = self.client.get_quota_root("INBOX")
self.client._raw_command_untagged.assert_called_once_with(
b"GETQUOTAROOT", b"INBOX", uid=False, response_name="QUOTAROOT"
)
expected = (MailboxQuotaRoots("INBOX", ["User quota"]), list())
self.assertTupleEqual(resp, expected)
resp = self.client.get_quota("INBOX")
self.assertEqual(resp, [])
class TestIdleAndNoop(IMAPClientTest):
def setUp(self):
super(TestIdleAndNoop, self).setUp()
self.client._cached_capabilities = [b"IDLE"]
def assert_sock_select_calls(self, sock):
self.assertListEqual(
sock.method_calls,
[
("settimeout", (None,), {}),
("setblocking", (0,), {}),
("setblocking", (1,), {}),
("settimeout", (None,), {}),
],
)
def assert_sock_poll_calls(self, sock):
self.assertListEqual(
sock.method_calls,
[
("settimeout", (None,), {}),
("setblocking", (0,), {}),
("fileno", (), {}),
("setblocking", (1,), {}),
("settimeout", (None,), {}),
],
)
def test_idle(self):
self.client._imap._command.return_value = sentinel.tag
self.client._imap._get_response.return_value = None
self.client.idle()
self.client._imap._command.assert_called_with("IDLE")
self.assertEqual(self.client._idle_tag, sentinel.tag)
@patch("imapclient.imapclient.POLL_SUPPORT", False)
@patch("imapclient.imapclient.select.select")
def test_idle_check_blocking(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([True], [], [])
counter = itertools.count()
def fake_get_line():
count = next(counter)
if count == 0:
return b"* 1 EXISTS"
elif count == 1:
return b"* 0 EXPUNGE"
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
mock_select.assert_called_once_with([mock_sock], [], [], None)
self.assert_sock_select_calls(mock_sock)
self.assertListEqual([(1, b"EXISTS"), (0, b"EXPUNGE")], responses)
@patch("imapclient.imapclient.POLL_SUPPORT", False)
@patch("imapclient.imapclient.select.select")
def test_idle_check_timeout(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([], [], [])
responses = self.client.idle_check(timeout=0.5)
mock_select.assert_called_once_with([mock_sock], [], [], 0.5)
self.assert_sock_select_calls(mock_sock)
self.assertListEqual([], responses)
@patch("imapclient.imapclient.POLL_SUPPORT", False)
@patch("imapclient.imapclient.select.select")
def test_idle_check_with_data(self, mock_select):
mock_sock = Mock()
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_select.return_value = ([True], [], [])
counter = itertools.count()
def fake_get_line():
count = next(counter)
if count == 0:
return b"* 99 EXISTS"
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
mock_select.assert_called_once_with([mock_sock], [], [], None)
self.assert_sock_select_calls(mock_sock)
self.assertListEqual([(99, b"EXISTS")], responses)
@patch("imapclient.imapclient.POLL_SUPPORT", True)
@patch("imapclient.imapclient.select.poll")
def test_idle_check_blocking_poll(self, mock_poll_module):
mock_sock = Mock(fileno=Mock(return_value=1))
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_poller = Mock(poll=Mock(return_value=[(1, POLLIN)]))
mock_poll_module.return_value = mock_poller
counter = itertools.count()
def fake_get_line():
count = next(counter)
if count == 0:
return b"* 1 EXISTS"
elif count == 1:
return b"* 0 EXPUNGE"
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
assert mock_poll_module.call_count == 1
mock_poller.register.assert_called_once_with(1, POLLIN)
mock_poller.poll.assert_called_once_with(None)
self.assert_sock_poll_calls(mock_sock)
self.assertListEqual([(1, b"EXISTS"), (0, b"EXPUNGE")], responses)
@patch("imapclient.imapclient.POLL_SUPPORT", True)
@patch("imapclient.imapclient.select.poll")
def test_idle_check_timeout_poll(self, mock_poll_module):
mock_sock = Mock(fileno=Mock(return_value=1))
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_poller = Mock(poll=Mock(return_value=[]))
mock_poll_module.return_value = mock_poller
responses = self.client.idle_check(timeout=0.5)
assert mock_poll_module.call_count == 1
mock_poller.register.assert_called_once_with(1, POLLIN)
mock_poller.poll.assert_called_once_with(500)
self.assert_sock_poll_calls(mock_sock)
self.assertListEqual([], responses)
@patch("imapclient.imapclient.POLL_SUPPORT", True)
@patch("imapclient.imapclient.select.poll")
def test_idle_check_with_data_poll(self, mock_poll_module):
mock_sock = Mock(fileno=Mock(return_value=1))
self.client._imap.sock = self.client._imap.sslobj = mock_sock
mock_poller = Mock(poll=Mock(return_value=[(1, POLLIN)]))
mock_poll_module.return_value = mock_poller
counter = itertools.count()
def fake_get_line():
count = next(counter)
if count == 0:
return b"* 99 EXISTS"
else:
raise socket.timeout
self.client._imap._get_line = fake_get_line
responses = self.client.idle_check()
assert mock_poll_module.call_count == 1
mock_poller.register.assert_called_once_with(1, POLLIN)
mock_poller.poll.assert_called_once_with(None)
self.assert_sock_poll_calls(mock_sock)
self.assertListEqual([(99, b"EXISTS")], responses)
def test_idle_done(self):
self.client._idle_tag = sentinel.tag
mockSend = Mock()
self.client._imap.send = mockSend
mockConsume = Mock(return_value=sentinel.out)
self.client._consume_until_tagged_response = mockConsume
result = self.client.idle_done()
mockSend.assert_called_with(b"DONE\r\n")
mockConsume.assert_called_with(sentinel.tag, "IDLE")
self.assertEqual(result, sentinel.out)
def test_noop(self):
mockCommand = Mock(return_value=sentinel.tag)
self.client._imap._command = mockCommand
mockConsume = Mock(return_value=sentinel.out)
self.client._consume_until_tagged_response = mockConsume
result = self.client.noop()
mockCommand.assert_called_with("NOOP")
mockConsume.assert_called_with(sentinel.tag, "NOOP")
self.assertEqual(result, sentinel.out)
def test_consume_until_tagged_response(self):
client = self.client
client._imap.tagged_commands = {sentinel.tag: None}
counter = itertools.count()
def fake_get_response():
count = next(counter)
if count == 0:
return b"* 99 EXISTS"
client._imap.tagged_commands[sentinel.tag] = ("OK", [b"Idle done"])
client._imap._get_response = fake_get_response
text, responses = client._consume_until_tagged_response(sentinel.tag, b"IDLE")
self.assertEqual(client._imap.tagged_commands, {})
self.assertEqual(text, b"Idle done")
self.assertListEqual([(99, b"EXISTS")], responses)
class TestDebugLogging(IMAPClientTest):
def test_IMAP_is_patched(self):
# Remove all logging handlers so that the order of tests does not
# prevent basicConfig from being executed
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
log_stream = six.StringIO()
logging.basicConfig(stream=log_stream, level=logging.DEBUG)
self.client._imap._mesg("two")
self.assertIn("DEBUG:imapclient.imaplib:two", log_stream.getvalue())
def test_redacted_password(self):
logger_mock = Mock()
logger_mock.manager.disable = logging.DEBUG
logger_mock.getEffectiveLevel.return_value = logging.DEBUG
adapter = IMAPlibLoggerAdapter(logger_mock, dict())
if six.PY3:
adapter.info("""> b'ICHH1 LOGIN foo@bar.org "secret"'""")
if sys.version_info >= (3, 6, 4):
# LoggerAdapter in Python 3.6.4+ calls logger.log()
logger_mock.log.assert_called_once_with(
logging.INFO, "> b'ICHH1 LOGIN **REDACTED**", extra={}
)
else:
# LoggerAdapter in Python 3.4 to 3.6 calls logger._log()
logger_mock._log.assert_called_once_with(
logging.INFO, "> b'ICHH1 LOGIN **REDACTED**", (), extra={}
)
else:
# LoggerAdapter in Python 2.7 calls logger.info()
adapter.info('> ICHH1 LOGIN foo@bar.org "secret"')
logger_mock.info.assert_called_once_with(
"> ICHH1 LOGIN **REDACTED**", extra={}
)
class TestTimeNormalisation(IMAPClientTest):
def test_default(self):
self.assertTrue(self.client.normalise_times)
@patch("imapclient.imapclient.parse_fetch_response")
def test_pass_through(self, parse_fetch_response):
self.client._imap._command_complete.return_value = ("OK", sentinel.data)
self.client._imap._untagged_response.return_value = ("OK", sentinel.fetch_data)
self.client.use_uid = sentinel.use_uid
def check(expected):
self.client.fetch(22, ["SOMETHING"])
parse_fetch_response.assert_called_with(
sentinel.fetch_data, expected, sentinel.use_uid
)
self.client.normalise_times = True
check(True)
self.client.normalise_times = False
check(False)
class TestNamespace(IMAPClientTest):
def setUp(self):
super(TestNamespace, self).setUp()
self.client._cached_capabilities = [b"NAMESPACE"]
def set_return(self, value):
self.client._imap.namespace.return_value = ("OK", [value])
def test_simple(self):
self.set_return(b'(("FOO." "/")) NIL NIL')
self.assertEqual(self.client.namespace(), ((("FOO.", "/"),), None, None))
def test_folder_decoding(self):
self.set_return(b'(("&AP8-." "/")) NIL NIL')
self.assertEqual(self.client.namespace(), ((("\xff.", "/"),), None, None))
def test_without_folder_decoding(self):
self.set_return(b'(("&AP8-." "/")) NIL NIL')
self.client.folder_encode = False
self.assertEqual(self.client.namespace(), (((b"&AP8-.", "/"),), None, None))
def test_other_only(self):
self.set_return(b'NIL NIL (("" "."))')
self.assertEqual(self.client.namespace(), (None, None, (("", "."),)))
def test_complex(self):
self.set_return(
b'(("" "/")) '
b'(("~" "/")) '
b'(("#shared/" "/") ("#public/" "/")("#ftp/" "/")("#news." "."))'
)
self.assertEqual(
self.client.namespace(),
(
(("", "/"),),
(("~", "/"),),
(("#shared/", "/"), ("#public/", "/"), ("#ftp/", "/"), ("#news.", ".")),
),
)
class TestCapabilities(IMAPClientTest):
def test_preauth(self):
self.client._imap.capabilities = ("FOO", "BAR")
self.client._imap.untagged_responses = {}
self.assertEqual(self.client.capabilities(), (b"FOO", b"BAR"))
def test_server_returned_capability_after_auth(self):
self.client._imap.capabilities = (b"FOO",)
self.client._imap.untagged_responses = {"CAPABILITY": [b"FOO MORE"]}
self.assertEqual(self.client._cached_capabilities, None)
self.assertEqual(self.client.capabilities(), (b"FOO", b"MORE"))
self.assertEqual(self.client._cached_capabilities, (b"FOO", b"MORE"))
self.assertEqual(self.client._imap.untagged_responses, {})
def test_caching(self):
self.client._imap.capabilities = ("FOO",)
self.client._imap.untagged_responses = {}
self.client._cached_capabilities = (b"FOO", b"MORE")
self.assertEqual(self.client.capabilities(), (b"FOO", b"MORE"))
def test_post_auth_request(self):
self.client._imap.capabilities = ("FOO",)
self.client._imap.untagged_responses = {}
self.client._imap.state = "SELECTED"
self.client._imap.capability.return_value = ("OK", [b"FOO BAR"])
self.assertEqual(self.client.capabilities(), (b"FOO", b"BAR"))
self.assertEqual(self.client._cached_capabilities, (b"FOO", b"BAR"))
def test_with_starttls(self):
# Initial connection
self.client._imap.capabilities = ("FOO",)
self.client._imap.untagged_responses = {}
self.client._imap.state = "NONAUTH"
self.assertEqual(self.client.capabilities(), (b"FOO",))
# Now do STARTTLS; capabilities change and should be reported.
self.client._starttls_done = True
self.client._imap.capability.return_value = ("OK", [b"FOO BAR"])
self.assertEqual(self.client.capabilities(), (b"FOO", b"BAR"))
# Login done; capabilities change again.
self.client._imap.state = "AUTH"
self.client._imap.capability.return_value = ("OK", [b"FOO BAR QUX"])
self.assertEqual(self.client.capabilities(), (b"FOO", b"BAR", b"QUX"))
def test_has_capability(self):
self.client._cached_capabilities = (b"FOO", b"MORE")
self.assertTrue(self.client.has_capability(b"FOO"))
self.assertTrue(self.client.has_capability(b"foo"))
self.assertFalse(self.client.has_capability(b"BAR"))
self.assertTrue(self.client.has_capability("FOO"))
self.assertTrue(self.client.has_capability("foo"))
self.assertFalse(self.client.has_capability("BAR"))
def test_decorator(self):
class Foo(object):
def has_capability(self, capability):
if capability == "TRUE":
return True
return False
@require_capability("TRUE")
def yes(self):
return True
@require_capability("FALSE")
def no(self):
return False
foo = Foo()
self.assertTrue(foo.yes())
self.assertRaises(CapabilityError, foo.no)
class TestId(IMAPClientTest):
def setUp(self):
super(TestId, self).setUp()
self.client._cached_capabilities = [b"ID"]
def test_id(self):
self.client._imap._simple_command.return_value = ("OK", [b"Success"])
self.client._imap._untagged_response.return_value = (
b"OK",
[b'("name" "GImap" "vendor" "Google, Inc.")'],
)
id_response = self.client.id_({"name": "IMAPClient"})
self.client._imap._simple_command.assert_called_with(
"ID", '("name" "IMAPClient")'
)
self.assertSequenceEqual(
id_response, ((b"name", b"GImap", b"vendor", b"Google, Inc."),)
)
def test_no_support(self):
self.client._cached_capabilities = (b"IMAP4rev1",)
self.assertRaises(CapabilityError, self.client.id_)
def test_invalid_parameters(self):
self.assertRaises(TypeError, self.client.id_, "bananarama")
class TestRawCommand(IMAPClientTest):
def setUp(self):
super(TestRawCommand, self).setUp()
self.client._imap._get_response.return_value = None
self.client._imap._command_complete.return_value = ("OK", ["done"])
self.client._cached_capabilities = ()
def check(self, command, args, expected):
typ, data = self.client._raw_command(command, args)
self.assertEqual(typ, "OK")
self.assertEqual(data, ["done"])
self.assertEqual(self.client._imap.sent, expected)
def test_plain(self):
self.check(
b"search",
[b"ALL"],
b"tag UID SEARCH ALL\r\n",
)
def test_not_uid(self):
self.client.use_uid = False
self.check(
b"search",
[b"ALL"],
b"tag SEARCH ALL\r\n",
)
def test_literal_at_end(self):
self.check(
b"search",
[b"TEXT", b"\xfe\xff"],
b"tag UID SEARCH TEXT {2}\r\n" b"\xfe\xff\r\n",
)
def test_embedded_literal(self):
self.check(
b"search",
[b"TEXT", b"\xfe\xff", b"DELETED"],
b"tag UID SEARCH TEXT {2}\r\n" b"\xfe\xff DELETED\r\n",
)
def test_multiple_literals(self):
self.check(
b"search",
[b"TEXT", b"\xfe\xff", b"TEXT", b"\xcc"],
b"tag UID SEARCH TEXT {2}\r\n" b"\xfe\xff TEXT {1}\r\n" b"\xcc\r\n",
)
def test_literal_plus(self):
self.client._cached_capabilities = (b"LITERAL+",)
typ, data = self.client._raw_command(
b"APPEND", [b"\xff", _literal(b"hello")], uid=False
)
self.assertEqual(typ, "OK")
self.assertEqual(data, ["done"])
self.assertEqual(
self.client._imap.sent,
b"tag APPEND {1+}\r\n" b"\xff {5+}\r\n" b"hello\r\n",
)
def test_complex(self):
self.check(
b"search",
[b"FLAGGED", b"TEXT", b"\xfe\xff", b"TEXT", b"\xcc", b"TEXT", b"foo"],
b"tag UID SEARCH FLAGGED TEXT {2}\r\n"
b"\xfe\xff TEXT {1}\r\n"
b"\xcc TEXT foo\r\n",
)
def test_invalid_input_type(self):
self.assertRaises(ValueError, self.client._raw_command, "foo", [])
self.assertRaises(ValueError, self.client._raw_command, "foo", ["foo"])
def test_failed_continuation_wait(self):
self.client._imap._get_response.return_value = b"blah"
self.client._imap.tagged_commands["tag"] = ("NO", ["go away"])
expected_error = r"unexpected response while waiting for continuation response: \(u?'NO', \[u?'go away'\]\)"
with self.assertRaisesRegex(IMAPClient.AbortError, expected_error):
self.client._raw_command(b"FOO", [b"\xff"])
class TestExpunge(IMAPClientTest):
def test_expunge(self):
mockCommand = Mock(return_value=sentinel.tag)
mockConsume = Mock(return_value=sentinel.out)
self.client._imap._command = mockCommand
self.client._consume_until_tagged_response = mockConsume
result = self.client.expunge()
mockCommand.assert_called_with("EXPUNGE")
mockConsume.assert_called_with(sentinel.tag, "EXPUNGE")
self.assertEqual(sentinel.out, result)
def test_id_expunge(self):
self.client._imap.uid.return_value = ("OK", [None])
self.assertEqual([None], self.client.expunge(["4", "5", "6"]))
class TestShutdown(IMAPClientTest):
def test_shutdown(self):
self.client.shutdown()
self.client._imap.shutdown.assert_called_once_with()
class TestContextManager(IMAPClientTest):
def test_context_manager(self):
with self.client as client:
self.assertIsInstance(client, IMAPClient)
self.client._imap.logout.assert_called_once_with()
@patch("imapclient.imapclient.logger")
def test_context_manager_fail_closing(self, mock_logger):
self.client._imap.logout.side_effect = RuntimeError("Error logout")
self.client._imap.shutdown.side_effect = RuntimeError("Error shutdown")
with self.client as client:
self.assertIsInstance(client, IMAPClient)
self.client._imap.logout.assert_called_once_with()
self.client._imap.shutdown.assert_called_once_with()
mock_logger.info.assert_called_once_with(
"Could not close the connection cleanly: %s",
self.client._imap.shutdown.side_effect,
)
def test_exception_inside_context_manager(self):
with self.assertRaises(ValueError):
with self.client as _:
raise ValueError("Error raised inside the context manager")
class TestProtocolError(IMAPClientTest):
def test_tagged_response_with_parse_error(self):
client = self.client
client._imap.tagged_commands = {sentinel.tag: None}
client._imap._get_response = lambda: b"NOT-A-STAR 99 EXISTS"
with self.assertRaises(ProtocolError):
client._consume_until_tagged_response(sentinel.tag, b"IDLE")
| 35.231569
| 116
| 0.58165
|
4a065e37123df798dc66dd5b88076eb1311db7e8
| 4,809
|
py
|
Python
|
.idea/VirtualEnvironment/Lib/site-packages/hstest/stage/stage_test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | null | null | null |
.idea/VirtualEnvironment/Lib/site-packages/hstest/stage/stage_test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | null | null | null |
.idea/VirtualEnvironment/Lib/site-packages/hstest/stage/stage_test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List, Optional, Tuple, Type
from hstest.common.reflection_utils import is_tests, setup_cwd
from hstest.common.utils import failed, passed
from hstest.dynamic.input.dynamic_testing import DynamicTestElement, search_dynamic_tests
from hstest.dynamic.output.colored_output import RED_BOLD, RESET
from hstest.dynamic.output.output_handler import OutputHandler
from hstest.dynamic.system_handler import SystemHandler
from hstest.exception.outcomes import OutcomeError, UnexpectedError, WrongAnswer
from hstest.outcomes.outcome import Outcome
from hstest.test_case.check_result import CheckResult
from hstest.test_case.test_case import TestCase
from hstest.testing.runner.async_main_file_runner import AsyncMainFileRunner
from hstest.testing.runner.test_runner import TestRunner
from hstest.testing.test_run import TestRun
class StageTest:
runner: TestRunner = AsyncMainFileRunner()
attach: Any = None
source: str = None
curr_test_run: Optional[TestRun] = None
_curr_test_global: int = 0
def __init__(self, source_name: str = ''):
if self.source:
self.source_name: str = self.source
else:
self.source_name: str = source_name
# super().__init__(method)
# self.module =
# def test_program(self):
# result, feedback = self.run_tests()
# if result != 0:
# self.fail(feedback)
def after_all_tests(self):
pass
def _init_tests(self) -> List[TestRun]:
test_runs: List[TestRun] = []
test_cases: List[TestCase] = list(self.generate())
test_cases += search_dynamic_tests(self)
if len(test_cases) == 0:
raise UnexpectedError("No tests found")
curr_test: int = 0
test_count = len(test_cases)
for test_case in test_cases:
test_case.source_name = self.source_name
if test_case.check_func is None:
test_case.check_func = self.check
if test_case.attach is None:
test_case.attach = self.attach
curr_test += 1
test_runs += [
TestRun(curr_test, test_count, test_case, self.runner)
]
return test_runs
def __print_test_num(self, num: int):
total_tests = '' if num == self._curr_test_global else f' ({self._curr_test_global})'
OutputHandler.get_real_out().write(
RED_BOLD + f'\nStart test {num}{total_tests}' + RESET + '\n'
)
def run_tests(self, *, debug=False) -> Tuple[int, str]:
if is_tests(self) or debug:
setup_cwd(self)
import hstest.common.utils as hs
hs.failed_msg_start = ''
hs.failed_msg_continue = ''
hs.success_msg = ''
curr_test: int = 0
need_tear_down: bool = False
try:
SystemHandler.set_up()
test_runs = self._init_tests()
for test_run in test_runs:
curr_test += 1
StageTest._curr_test_global += 1
self.__print_test_num(curr_test)
if test_run.is_first_test():
test_run.set_up()
need_tear_down = True
StageTest.curr_test_run = test_run
result: CheckResult = test_run.test()
if not result.is_correct:
raise WrongAnswer(result.feedback)
if test_run.is_last_test():
need_tear_down = False
test_run.tear_down()
SystemHandler.tear_down()
return passed()
except BaseException as ex:
if need_tear_down:
try:
StageTest.curr_test_run.tear_down()
except BaseException as new_ex:
if isinstance(new_ex, OutcomeError):
ex = new_ex
outcome: Outcome = Outcome.get_outcome(ex, curr_test)
fail_text = str(outcome)
try:
SystemHandler.tear_down()
except BaseException:
pass
return failed(fail_text)
finally:
StageTest.curr_test_run = None
self.after_all_tests()
_dynamic_methods: Dict[Type['StageTest'], List[DynamicTestElement]] = {}
@classmethod
def dynamic_methods(cls) -> List[DynamicTestElement]:
if cls in StageTest._dynamic_methods:
return StageTest._dynamic_methods[cls]
empty = []
StageTest._dynamic_methods[cls] = empty
return empty
def generate(self) -> List[TestCase]:
return []
def check(self, reply: str, attach: Any) -> CheckResult:
raise UnexpectedError('Can\'t check result: override "check" method')
| 33.866197
| 93
| 0.613433
|
4a065e42179a0d85c6deb5a59c4068c72778921a
| 10,149
|
py
|
Python
|
bindings/python/cntk/layers/higher_order_layers.py
|
shyamalschandra/CNTK
|
0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d
|
[
"MIT"
] | 17,702
|
2016-01-25T14:03:01.000Z
|
2019-05-06T09:23:41.000Z
|
bindings/python/cntk/layers/higher_order_layers.py
|
shyamalschandra/CNTK
|
0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d
|
[
"MIT"
] | 3,489
|
2016-01-25T13:32:09.000Z
|
2019-05-03T11:29:15.000Z
|
bindings/python/cntk/layers/higher_order_layers.py
|
shyamalschandra/CNTK
|
0e7a6cd4cc174eab28eaf2ffc660c6380b9e4e2d
|
[
"MIT"
] | 5,180
|
2016-01-25T14:02:12.000Z
|
2019-05-06T04:24:28.000Z
|
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
'''
Higher-order functions, like :func:`Sequential` and :func:`ResNetBlock`. Note that
sequential higher-order functions like :func:`~cntk.layers.sequence.Recurrence` are in :mod:`cntk.layers.sequence`.
'''
from types import FunctionType
from inspect import getargspec
from .blocks import _inject_name, identity
# TODO: should we have a parameter to specify the arity of the input?
# Can it be automatically determined? (yes, unless the first function is a tuple, then we don't know whether to broadcast or not)
def Sequential(layers, name=''):
'''
Sequential(layers, name='')
Layer factory function to create a composite that applies a sequence of layers (or any functions) onto an input.
``Sequential ([F, G, H])(x)`` means the same as ``H(G(F(x)))``.
The list of functions may also include tuples of functions. In that case, each function
in a tuple is applied to the input, and the result is a tuple containing the results of
these function applications. If followed by another function (typ. ``plus`` or ``splice``),
the tuple items form the arguments to that function.
Intermediate values in the chain can be accessed by name by inserting a ``Label(name=...)`` layer.
Note: An equivalent way of writing ``Sequential ([F, G, H])(x)`` is ``F >> G >> H``.
Example:
>>> from cntk.layers import *
>>> # sequence classifier. Maps a one-hot word sequence to a scalar probability value.
>>> # The recurrence is a Fold(), meaning only the final hidden state is produced.
>>> # The Label() layer allows to access the final hidden layer by name.
>>> model = Sequential([Embedding(300), Fold(LSTM(500)), Label('hidden'), Dense(1, activation=sigmoid)])
>>> model.update_signature(Sequence[Tensor[30000]])
>>> model.hidden.shape
(500,)
>>> # simple example that squares an input value
>>> f = Sequential([C.log, lambda x: 2 * x, C.exp]) # the second function is a Python lambda
>>> f.update_signature(1)
>>> f([np.array([2])]) # log, times 2, exp is the same as computing the square
array([[ 4.]], dtype=float32)
>>> # using function tuples to implement a bidirectional LSTM
>>> bi_lstm = Sequential([(Recurrence(LSTM(250)), # first tuple entry: forward pass
... Recurrence(LSTM(250), go_backwards=True)), # second: backward pass
... splice]) # splice both on top of each other
>>> # using function tuple to implement a ResNet block
>>> # The function tuple applies all items to the input, and emits a tuple with the results
>>> # that then act as the arguments to the next one.
>>> # Here we say (Convolution(), identity), which generates two arguments to the next function,
>>> # the first being the convolution, the second being the input passed through.
>>> # Following that with plus() implements the ResNet formula.
>>> from cntk.ops import plus, relu
>>> resnet_layer = Sequential([(Convolution((3,3), 64, activation=None), # first tuple entry
... identity), # second tuple entry is a pass-through
... plus, # this sums both
... relu]) # activation applied afterwards
>>> # simple function-tuples example with values
>>> f = Sequential([(lambda x: x * x, identity), splice]) # computes tuple (x^2, x) and splices both values
>>> f.update_signature(1)
>>> f([np.array([2])])
array([[ 4., 2.]], dtype=float32)
Args:
layers (list of :class:`~cntk.ops.functions.Function`, equivalent Python functions, tuples of functions, or lists thereof): the list of functions to apply in sequence.
A tuple aplies each of its items to the input and results in a tuple value.
An item that is a list will be flattened.
Returns:
cntk.ops.functions.Function:
A function that accepts one argument and applies the given ``functions`` one after another.
'''
if not isinstance(layers, list): # to support nested lists, run every item recursively through Sequential()
# TODO: Is this confusing w.r.t. tuple which is parallel and list which is sequential?
return layers
from functools import reduce
layers = [Sequential(layer) for layer in layers] # expand all layers recursively
composed_function = reduce(lambda f, g: f >> g, layers, identity)
return _inject_name(composed_function, name)
def For(what_range, constructor, name=''):
'''
For(what_range, constructor, name='')
Layer factory function to create a composite through a pattern similar to Python's `for` statement.
This layer factory loops over the given range and passes each value to the constructor function.
It is equivalent to
``Sequential([constructor(i) for i in what_range])``.
It is acceptable that ``constructor`` takes no argument.
Example:
>>> from cntk.layers import *
>>> from cntk.ops import relu
>>> # stack of 3 Dense relu layers
>>> model = For(range(3), lambda: Dense(2000, activation=relu))
>>> # version of the above that has no activation for the last layer
>>> model = For(range(3), lambda i: Dense(2000, activation=relu if i < 2 else identity))
>>> # complex example that uses For() inside Sequential()
>>> with default_options(activation=relu, pad=True): # default activation is relu
... model = Sequential([
... For(range(2), lambda : [
... Convolution2D((3,3), 64),
... Convolution2D((3,3), 64),
... MaxPooling((3,3), strides=2)
... ]),
... Label('ndfeat'), # name this specific value
... For(range(2), lambda i: [ # this passes a nested list to Sequential
... Dense([256,128][i]), # layer index i used to index into an array of parameters
... Dropout(0.5)
... ]),
... Label('hidden'),
... Dense(10, activation=None) # activation parameter overrides default (which was set to relu)
... ])
>>> model.update_signature((3,32,32)) # RGB, 32 x 32 pixels
>>> model.ndfeat.shape # shape at top of convo/pooling pyramid
(64, 8, 8)
>>> model.hidden.shape # shape before classifier
(128,)
Args:
what_range (range): a Python range to loop over
constructor (Python function/lambda with 1 or 0 arguments): lambda that constructs a layer
Returns:
cntk.ops.functions.Function:
A function that accepts one argument and applies the layers as constructed by ``constructor`` one after another.
'''
# Python 2.7 support requires us to use getargspec() instead of inspect
takes_arg = len(getargspec(constructor).args) > 0
# For Python 3, check if it is a python function/lambda
if type(constructor) != FunctionType or not callable(constructor):
raise ValueError("constructor must be a Python function/lambda")
# helper to call the layer constructor
def call(i):
if takes_arg:
return constructor(i) # takes an arg: pass it
else:
return constructor() # takes no arg: call without, that's fine too
layers = [call(i) for i in what_range]
sequential = Sequential(layers)
return _inject_name(sequential, name)
def SequentialClique(functions, name=''):
'''
SequentialClique(functions, name='')
Layer factory function to create a composite that applies a sequence of functions onto an input,
with skip connections between all function. I.e. each function receives a sum of the input and all
prior functions' outputs.
Example:
>>> from cntk.layers import *
>>> from cntk.ops import abs, sqrt, square
>>> x = C.input_variable(2)
>>> seq_clique = SequentialClique([abs, sqrt, square])
>>> seq_clique(x).eval(np.array([2, 8], np.float32)) # 400 = square((8 + abs(8)) + sqrt(8 + abs(8)))
array([[ 36., 400.]], dtype=float32)
Args:
functions (single or list of :class:`~cntk.ops.functions.Function`): functions to be applied.
Returns:
cntk.ops.functions.Function:
A function that accepts one argument and applies the sequence of functions.
'''
def clique(x):
for f in functions:
out = f(x)
# BUGBUG: this should be a splice(), and it should be along depth.
# Interface to be finalized.
x = x + out
return out
clique = _inject_name(clique, name)
return clique
# TODO: consider potential name clash; users might want to call their functions the same.
def ResNetBlock(f, name=''):
'''
ResNetBlock(f, name='')
Layer factory function to create a composite that adds a skip connection to a function.
This is equivalent to ``Sequential((f, identity), plus)``.
Example:
>>> # a ResNet layer
>>> from cntk.layers import *
>>> from cntk.ops import relu
>>> resnet_layer = Sequential([ResNetBlock(Convolution((3,3), 64, activation=None)), relu])
Args:
f (:class:`~cntk.ops.functions.Function` or equivalent Python function):
the function to add the skip connection to.
Returns:
cntk.ops.functions.Function:
A function that accepts one argument, applies ``f`` to it, and adds the original argument.
'''
def skip(x):
return f(x) + x
skip = _inject_name(skip, name)
return skip
| 43.935065
| 173
| 0.613755
|
4a065e7d7b4506661c1a7ed161272b9eb71b9352
| 2,811
|
py
|
Python
|
images/images/GUI_PyOneDark/gui/widgets/py_toggle/py_toggle.py
|
ussnllmn/Elasticsearch-Python-GUI
|
6e1efb73ee566cb75b3372bd69348d98665d53c7
|
[
"MIT"
] | 1
|
2021-11-29T11:01:46.000Z
|
2021-11-29T11:01:46.000Z
|
images/images/GUI_PyOneDark/gui/widgets/py_toggle/py_toggle.py
|
ussnllmn/Elasticsearch-GUI-Pyside6
|
6e1efb73ee566cb75b3372bd69348d98665d53c7
|
[
"MIT"
] | null | null | null |
images/images/GUI_PyOneDark/gui/widgets/py_toggle/py_toggle.py
|
ussnllmn/Elasticsearch-GUI-Pyside6
|
6e1efb73ee566cb75b3372bd69348d98665d53c7
|
[
"MIT"
] | null | null | null |
# ///////////////////////////////////////////////////////////////
#
# BY: WANDERSON M.PIMENTA
# PROJECT MADE WITH: Qt Designer and PySide6
# V: 1.0.0
#
# This project can be used freely for all uses, as long as they maintain the
# respective credits only in the Python scripts, any information in the visual
# interface (GUI) can be modified without any implication.
#
# There are limitations on Qt licenses if you want to use your products
# commercially, I recommend reading them on the official website:
# https://doc.qt.io/qtforpython/licenses.html
#
# ///////////////////////////////////////////////////////////////
# IMPORT QT CORE
# ///////////////////////////////////////////////////////////////
from qt_core import *
class PyToggle(QCheckBox):
def __init__(
self,
width = 50,
bg_color = "#777",
circle_color = "#DDD",
active_color = "#00BCFF",
animation_curve = QEasingCurve.OutBounce
):
QCheckBox.__init__(self)
self.setFixedSize(width, 28)
self.setCursor(Qt.PointingHandCursor)
# COLORS
self._bg_color = bg_color
self._circle_color = circle_color
self._active_color = active_color
self._position = 3
self.animation = QPropertyAnimation(self, b"position")
self.animation.setEasingCurve(animation_curve)
self.animation.setDuration(500)
self.stateChanged.connect(self.setup_animation)
@Property(float)
def position(self):
return self._position
@position.setter
def position(self, pos):
self._position = pos
self.update()
# START STOP ANIMATION
def setup_animation(self, value):
self.animation.stop()
if value:
self.animation.setEndValue(self.width() - 26)
print("Status : ON")
else:
self.animation.setEndValue(4)
print("Status : OFF")
self.animation.start()
def hitButton(self, pos: QPoint):
return self.contentsRect().contains(pos)
def paintEvent(self, e):
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setFont(QFont("Segoe UI", 9))
# SET PEN
p.setPen(Qt.NoPen)
# DRAW RECT
rect = QRect(0, 0, self.width(), self.height())
if not self.isChecked():
p.setBrush(QColor(self._bg_color))
p.drawRoundedRect(0,0,rect.width(), 28, 14, 14)
p.setBrush(QColor(self._circle_color))
p.drawEllipse(self._position, 3, 22, 22)
else:
p.setBrush(QColor(self._active_color))
p.drawRoundedRect(0,0,rect.width(), 28, 14, 14)
p.setBrush(QColor(self._circle_color))
p.drawEllipse(self._position, 3, 22, 22)
p.end()
| 31.233333
| 78
| 0.57524
|
4a065eb40bf079f171d6ab2d4582e5b9a64a45e5
| 6,330
|
py
|
Python
|
plum_tools/utils/sshconf.py
|
seekplum/plum_tools
|
1f5404a3afa42a987fbf7799f198f8c3de6b5cc5
|
[
"Apache-2.0"
] | null | null | null |
plum_tools/utils/sshconf.py
|
seekplum/plum_tools
|
1f5404a3afa42a987fbf7799f198f8c3de6b5cc5
|
[
"Apache-2.0"
] | 1
|
2020-03-31T02:26:37.000Z
|
2020-03-31T02:26:37.000Z
|
plum_tools/utils/sshconf.py
|
seekplum/plum_tools
|
1f5404a3afa42a987fbf7799f198f8c3de6b5cc5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
#=============================================================================
# ProjectName: plum_tools
# FileName: sshconf
# Desc: 解析ssh配置函数
# Author: seekplum
# Email: 1131909224m@sina.cn
# HomePage: seekplum.github.io
# Create: 2018-07-07 17:45
#=============================================================================
"""
import re
import sys
from ..conf import PathConfig
from .utils import YmlConfig
from .utils import print_error
class SSHConf(object):
"""SSH相关配置"""
def __init__(self, user, port, identityfile):
"""初始
:param user ssh登陆使用的用户名
:type user str
:example user 10.10.100.1
:param port ssh登陆使用的端口号
:type port int
:example port 22
:param identityfile 主机ip
:type identityfile str
:example identityfile ~/.ssh/id_rsa
"""
self._user = user
self._port = port
self._identityfile = identityfile
def get_ssh_conf(self, host):
"""查询默认的ssh登陆信息
:param host: 主机ip
:type host str
:example host 10.10.100.1
:rtype ssh_conf dict
:return ssh_conf ssh主机信息
:example ssh_conf
{
'identityfile': '~/.ssh/seekplum',
'hostname': 'github.com',
'user': 'seekplum',
'port': 22
}
"""
yml_config = YmlConfig.parse_config_yml(PathConfig.plum_yml_path)
ssh_conf = yml_config["default_ssh_conf"]
if self._user:
ssh_conf["user"] = self._user
if self._port:
ssh_conf["port"] = self._port
if self._identityfile:
ssh_conf["identityfile"] = self._identityfile
ssh_conf["hostname"] = host
return ssh_conf
def merge_ssh_conf(self, alias_conf):
"""合并ssh配置信息
:param alias_conf 在./ssh/config配置文件中的ssh主机信息
:type alias_conf dict
:example alias_conf
{
'identityfile': '~/.ssh/seekplum',
'hostname': 'github.com',
'user': 'seekplum',
'port': 22
}
:rtype ssh_conf dict
:return ssh_conf 和输入信息合并后的ssh主机信息
:example ssh_conf
{
'identityfile': '~/.ssh/seekplum',
'hostname': 'github.com',
'user': 'seekplum',
'port': 22
}
"""
yml_config = YmlConfig.parse_config_yml(PathConfig.plum_yml_path)
default_ssh_conf = yml_config["default_ssh_conf"]
ssh_conf = {
"identityfile": self._identityfile
or alias_conf.get("identityfile", default_ssh_conf["identityfile"]),
"hostname": alias_conf["hostname"],
"user": self._user or alias_conf.get("user", default_ssh_conf["user"]),
"port": int(self._port or alias_conf.get("port", default_ssh_conf["port"])),
}
return ssh_conf
def get_prefix_host_ip(host_type):
"""查询不同类型的前三段IP
:param host_type ip类型,不同的ip类型,ip前缀不一样
:type host_type str
:example host_type default
:rtype prefix_host str
:return prefix_host IP前三段值
:example prefix_host 10.10.100
"""
type_key = "host_type_%s" % host_type
try:
yml_config = YmlConfig.parse_config_yml(PathConfig.plum_yml_path)
prefix_host = yml_config[type_key]
except KeyError:
print_error("yml文件: %s 中缺少key: %s" % (PathConfig.plum_yml_path, type_key))
sys.exit(1)
return prefix_host
def get_host_ip(host, host_type):
"""查询主机的ip
:param host: ip的简写
:type host str
:example host 1
:param host_type ip类型,不同的ip类型,ip前缀不一样
:type host_type str
:example host_type default
:rtype str
:return 完整的主机ip
"""
prefix_host = get_prefix_host_ip(host_type)
mark = "."
# 处理输入的前两位的情况
point_count = host.count(mark)
# 标准ip中点的数量
normal_point = 3
if point_count < normal_point:
prefix_host = mark.join(prefix_host.split(mark)[: (normal_point - point_count)])
host = "%s.%s" % (prefix_host, host)
return host
def get_ssh_alias_conf(host):
"""解析~/.ssh/config配置信息
:rtype ssh_conf dict
:return ssh_conf ssh主机信息
:example ssh_conf
{
'identityfile': '~/.ssh/seekplum',
'hostname': 'github.com',
'user': 'seekplum',
'port': 22
}
"""
begin = False
# 查询默认的ssh信息
ssh_conf = {}
with open(PathConfig.ssh_config_path, "r") as f:
for line in f:
data = line.split()
# config配置都是两列
if len(data) != 2:
continue
key = data[0].lower()
value = data[1]
# 多个主机信息已 Host 进行分隔
if key == "host":
if begin: # pylint: disable=R1723
break
elif value == host:
begin = True
else:
continue
if begin:
ssh_conf[key] = value
if not begin:
print_error("未在 %s 中配置主机 %s 的ssh登陆信息" % (PathConfig.ssh_config_path, host))
sys.exit(1)
return ssh_conf
def merge_ssh_config(host, host_type, user, port, identityfile):
"""合并ssh配置信息
:param host: ip的简写或者主机的别名
:type host str
:example host 1
:param host_type ip类型,不同的ip类型,ip前缀不一样
:type host_type str
:example host_type default
:param user ssh登陆用户名
:type user str
:example user root
:param port ssh登陆端口
:type port int
:example port 22
:param user ssh登陆用户名
:type user str
:example user root
:param identityfile ssh登陆私钥文件路径
:type identityfile str
:example identityfile ~/.ssh/id_rsa
:rtype ssh_conf dict
:return ssh_conf ssh主机信息
:example ssh_conf
{
'identityfile': '~/.ssh/seekplum',
'hostname': 'github.com',
'user': 'seekplum',
'port': 22
}
"""
pattern = re.compile(r"^(?:\d+\.){0,3}\d+$")
match = pattern.match(host)
conf_obj = SSHConf(user, port, identityfile)
# 传入的是ip的简写
if match:
host = get_host_ip(host, host_type)
ssh_conf = conf_obj.get_ssh_conf(host)
else:
alias_conf = get_ssh_alias_conf(host)
ssh_conf = conf_obj.merge_ssh_conf(alias_conf)
return ssh_conf
| 25.62753
| 88
| 0.563507
|
4a066064b15cb8b4506492a8bdd07f8e152bc911
| 3,092
|
py
|
Python
|
tests/python/bench_reverse.py
|
Erotemic/misc
|
6f8460a690d05e7e0117becc6cae9902cbe2cedd
|
[
"Apache-2.0"
] | 5
|
2021-04-29T21:07:18.000Z
|
2021-09-29T08:46:08.000Z
|
tests/python/bench_reverse.py
|
Erotemic/misc
|
6f8460a690d05e7e0117becc6cae9902cbe2cedd
|
[
"Apache-2.0"
] | null | null | null |
tests/python/bench_reverse.py
|
Erotemic/misc
|
6f8460a690d05e7e0117becc6cae9902cbe2cedd
|
[
"Apache-2.0"
] | 1
|
2018-04-07T12:26:21.000Z
|
2018-04-07T12:26:21.000Z
|
def benchmark_reversed_range():
import ubelt as ub
import pandas as pd
import timerit
import itertools as it
methods = []
def custom_reversed_range_v1(start, stop):
final = stop - 1
for idx in range(stop - start):
yield final - idx
def custom_reversed_range_v2(start, stop):
yield from it.islice(it.count(stop - 1, step=-1), stop - start)
@methods.append
def reversed_builtin(x):
start = 10
stop = x + start
ret = list(reversed(range(start, stop)))
return ret
@methods.append
def negative_range(x):
start = 10
stop = x + start
ret = list(range(stop - 1, start - 1, -1))
return ret
# @methods.append
# def custom_v1(x):
# start = 10
# stop = x + start
# ret = list(custom_reversed_range_v1(start, stop))
# return ret
# @methods.append
# def custom_v2(x):
# start = 10
# stop = x + start
# ret = list(custom_reversed_range_v2(start, stop))
# return ret
method_lut = {f.__name__: f for f in methods}
results = {k: func(10) for k, func in method_lut.items()}
print('results = {}'.format(ub.repr2(results, nl=1, align=':')))
if not ub.allsame(results.values()):
raise AssertionError('Failed consistency check')
ti = timerit.Timerit(1000, bestof=10, verbose=2)
basis = {
'method': list(method_lut.keys()),
'x': [2 ** i for i in range(14)],
}
grid_iter = ub.named_product(basis)
# For each variation of your experiment, create a row.
rows = []
for params in grid_iter:
key = ub.repr2(params, compact=1, si=1)
kwargs = params.copy()
method_key = kwargs.pop('method')
method = method_lut[method_key]
# Timerit will run some user-specified number of loops.
# and compute time stats with similar methodology to timeit
for timer in ti.reset(key):
# Put any setup logic you dont want to time here.
# ...
with timer:
# Put the logic you want to time here
method(**kwargs)
row = {
'mean': ti.mean(),
'min': ti.min(),
'key': key,
**params,
}
rows.append(row)
# The rows define a long-form pandas data array.
# Data in long-form makes it very easy to use seaborn.
data = pd.DataFrame(rows)
print(data)
plot = True
if plot:
# import seaborn as sns
# kwplot autosns works well for IPython and script execution.
# not sure about notebooks.
import kwplot
sns = kwplot.autosns()
# Your variables may change
ax = kwplot.figure(fnum=1, doclf=True).gca()
sns.lineplot(data=data, x='x', y='min', hue='method', marker='o', ax=ax)
# ax.set_xscale('log')
ax.set_title('Benchmark Reveral Methods ')
ax.set_xlabel('A better x-variable description')
ax.set_ylabel('A better y-variable description')
| 29.447619
| 80
| 0.571475
|
4a06610b20f68064d22bef086f27d4a6836995f1
| 108
|
py
|
Python
|
modules/2.79/bpy/types/OutflowFluidSettings.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/OutflowFluidSettings.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/OutflowFluidSettings.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
class OutflowFluidSettings:
use = None
use_animated_mesh = None
volume_initialization = None
| 13.5
| 32
| 0.722222
|
4a0661d4849cd203409e852e7823db4571835c77
| 4,654
|
py
|
Python
|
gobbli/model/mtdnn/src/mt_dnn/gobbli_batcher.py
|
awesome-archive/gobbli
|
71aacbdc1184871b164185dc0c9f615f07b83173
|
[
"Apache-2.0"
] | 276
|
2019-09-13T08:25:51.000Z
|
2022-03-05T13:07:55.000Z
|
gobbli/model/mtdnn/src/mt_dnn/gobbli_batcher.py
|
awesome-archive/gobbli
|
71aacbdc1184871b164185dc0c9f615f07b83173
|
[
"Apache-2.0"
] | 15
|
2019-09-06T14:05:30.000Z
|
2022-01-01T20:15:06.000Z
|
gobbli/model/mtdnn/src/mt_dnn/gobbli_batcher.py
|
awesome-archive/gobbli
|
71aacbdc1184871b164185dc0c9f615f07b83173
|
[
"Apache-2.0"
] | 24
|
2019-09-18T15:11:42.000Z
|
2021-12-23T18:59:55.000Z
|
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import csv
import json
import logging
import os
import pickle as pkl
import random
import string
import sys
from shutil import copyfile
import numpy as np
import torch
from pytorch_pretrained_bert.tokenization import BertTokenizer
UNK_ID=100
BOS_ID=101
def tokenize_sample(bert_tokenizer, sample, max_seq_len, has_labels, labels):
"""
Run tokenization for a singel-sentence task.
"""
X = bert_tokenizer.tokenize(sample['X'])
if has_labels:
y = sample['y']
if len(X) > max_seq_len - 3:
X = X[:max_seq_len - 3]
input_ids = bert_tokenizer.convert_tokens_to_ids(['[CLS]'] + X + ['[SEP]'])
token_type_ids = [0] * ( len(X) + 2)
tokenized = {'input_ids': input_ids, 'token_type_ids': token_type_ids}
if has_labels:
tokenized['y'] = labels[y]
return tokenized
class GobbliBatchGen:
def __init__(self, path, batch_size=32, gpu=True, labels=None,
has_labels=True, is_train=True, dropout_w=0.005, maxlen=128):
self.batch_size = batch_size
self.has_labels = has_labels
self.gpu = gpu
self.labels = labels
self.is_train = is_train
# Explicit cache dir required for some reason -- default doesn't exist in the docker
# container, maybe?
self.bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir='/tmp')
self.data = self.load(path, maxlen, has_labels)
if self.is_train:
indices = list(range(len(self.data)))
random.shuffle(indices)
data = [self.data[i] for i in indices]
self.data = GobbliBatchGen.make_batches(self.data, batch_size)
self.offset = 0
self.dropout_w = dropout_w
@staticmethod
def make_batches(data, batch_size=32):
return [data[i:i + batch_size] for i in range(0, len(data), batch_size)]
def load(self, path, maxlen, has_labels):
data = []
with open(path, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
sample = tokenize_sample(self.bert_tokenizer, row, maxlen, has_labels, self.labels)
if self.is_train:
# TODO why is this needed?
if len(sample['input_ids']) > maxlen:
continue
data.append(sample)
print('Loaded {} samples'.format(len(data)))
return data
def reset(self):
if self.is_train:
indices = list(range(len(self.data)))
random.shuffle(indices)
self.data = [self.data[i] for i in indices]
self.offset = 0
def __random_select__(self, arr):
if self.dropout_w > 0:
return [UNK_ID if random.uniform(0, 1) < self.dropout_w else e for e in arr]
else: return arr
def __len__(self):
return len(self.data)
def patch(self, v):
v = v.cuda(async=True)
return v
@staticmethod
def todevice(v, device):
v = v.to(device)
return v
def __iter__(self):
while self.offset < len(self):
batch = self.data[self.offset]
batch_size = len(batch)
tok_len = max(len(x['input_ids']) for x in batch)
input_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
token_type_ids = torch.LongTensor(batch_size, tok_len).fill_(0)
attention_mask = torch.LongTensor(batch_size, tok_len).fill_(0)
for i, sample in enumerate(batch):
select_len = min(len(sample['input_ids']), tok_len)
tok = sample['input_ids']
if self.is_train:
tok = self.__random_select__(tok)
input_ids[i, :select_len] = torch.LongTensor(tok[:select_len])
token_type_ids[i, :select_len] = torch.LongTensor(sample['token_type_ids'][:select_len])
attention_mask[i, :select_len] = torch.LongTensor([1] * select_len)
batch_data = [input_ids, token_type_ids, attention_mask]
if self.has_labels:
labels = [sample['y'] for sample in batch]
batch_data.append(torch.LongTensor(labels))
if self.gpu:
for i, item in enumerate(batch_data):
batch_data[i] = self.patch(item.pin_memory())
self.offset += 1
if self.has_labels:
yield batch_data[0], batch_data[1], batch_data[2], batch_data[3]
else:
yield batch_data[0], batch_data[1], batch_data[2]
| 33.970803
| 104
| 0.597765
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.