hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70ce53e98f029127d59a58d0a03d540892eb8226
| 159
|
py
|
Python
|
src/tello/data_providers/__init__.py
|
libornovax/tello
|
4273a97a74e20617bfea797968a11961d0695a83
|
[
"MIT"
] | 2
|
2019-11-03T18:11:33.000Z
|
2019-11-16T10:11:56.000Z
|
src/tello/data_providers/__init__.py
|
libornovax/tello
|
4273a97a74e20617bfea797968a11961d0695a83
|
[
"MIT"
] | null | null | null |
src/tello/data_providers/__init__.py
|
libornovax/tello
|
4273a97a74e20617bfea797968a11961d0695a83
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from tello.data_providers.state_data_provider import StateDataProvider
from tello.data_providers.camera_data_provider import CameraDataProvider
| 39.75
| 72
| 0.893082
| 20
| 159
| 6.8
| 0.6
| 0.132353
| 0.191176
| 0.323529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.069182
| 159
| 3
| 73
| 53
| 0.912162
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cb0cf32ea18abaa5da3e93b086e8a7e4eec711cd
| 593
|
py
|
Python
|
urllib/parse/demo10.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | null | null | null |
urllib/parse/demo10.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | 3
|
2021-09-08T01:11:16.000Z
|
2022-03-02T15:14:03.000Z
|
urllib/parse/demo10.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | 1
|
2019-08-04T09:57:29.000Z
|
2019-08-04T09:57:29.000Z
|
from urllib.parse import urljoin
print(urljoin('http://www.baidu.com', 'FAQ.html'))
print(urljoin('http://www.baidu.com', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://cuiqingcai.com/FAQ.html?question=2'))
print(urljoin('http://www.baidu.com?wd=abc', 'https://cuiqingcai.com/index.php'))
print(urljoin('http://www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com', '?category=2#comment'))
print(urljoin('www.baidu.com#comment', '?category=2'))
| 59.3
| 95
| 0.715008
| 90
| 593
| 4.711111
| 0.255556
| 0.226415
| 0.207547
| 0.268868
| 0.801887
| 0.801887
| 0.643868
| 0.643868
| 0.643868
| 0.556604
| 0
| 0.006981
| 0.033727
| 593
| 10
| 96
| 59.3
| 0.732984
| 0
| 0
| 0
| 0
| 0
| 0.632997
| 0.035354
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0.888889
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
cb282d58203417af2067b2527f6d82b5ba18ff80
| 153
|
py
|
Python
|
libplf/__init__.py
|
yyyyyp/libplf
|
fb73e3c55277e3e94c63e596513b188fedadcf35
|
[
"MIT"
] | 2
|
2020-06-10T11:38:50.000Z
|
2020-06-11T10:22:39.000Z
|
libplf/__init__.py
|
yyyyyp/libplf
|
fb73e3c55277e3e94c63e596513b188fedadcf35
|
[
"MIT"
] | null | null | null |
libplf/__init__.py
|
yyyyyp/libplf
|
fb73e3c55277e3e94c63e596513b188fedadcf35
|
[
"MIT"
] | 3
|
2020-06-11T10:25:02.000Z
|
2020-06-12T03:01:34.000Z
|
from __future__ import annotations
from .vector import T as vector
from .point import T as point
from .piece import T as piece
from .plf import T as plf
| 25.5
| 34
| 0.79085
| 28
| 153
| 4.178571
| 0.357143
| 0.239316
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183007
| 153
| 5
| 35
| 30.6
| 0.936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cb5d6c8c4810a7e3fa3b41ae8c89aa2dea530ff2
| 8,068
|
py
|
Python
|
utils/layer_macros.py
|
sreesxlnc/kaggle-right-whale
|
14d5c8a002610dbaadaa0a3c84cca57252201a68
|
[
"MIT"
] | 200
|
2016-01-18T17:13:41.000Z
|
2022-02-18T10:50:50.000Z
|
utils/layer_macros.py
|
ashishlal/kaggle-right-whale
|
f5b8a369866144c7b9ccfeed803b578ae10abe3e
|
[
"MIT"
] | 4
|
2016-04-14T01:56:19.000Z
|
2017-07-18T08:15:32.000Z
|
utils/layer_macros.py
|
ashishlal/kaggle-right-whale
|
f5b8a369866144c7b9ccfeed803b578ae10abe3e
|
[
"MIT"
] | 78
|
2016-01-18T22:31:39.000Z
|
2020-01-04T11:26:41.000Z
|
import lasagne as nn
from lasagne.layers import dnn
from utils.layers import batch_norm2 as batch_norm
def conv2dbn(l, name, **kwargs):
""" Batch normalized DNN Conv2D Layer """
l = nn.layers.dnn.Conv2DDNNLayer(
l, name=name,
**kwargs
)
l = batch_norm(l, name='%sbn' % name)
return l
def conv2dbn2(l, name='', **kwargs):
""" Batch normalized DNN Conv2D Layer """
l = nn.layers.dnn.Conv2DDNNLayer(
l, name=name,
**kwargs
)
l = nn.layers.batch_norm(l, name='%sbn' % name)
return l
# def residual_block(layer, name, num_layers,
# num_filters, filter_size=3, stride=1, pad='same',
# nonlinearity=nn.nonlinearities.rectify):
# conv = layer
# if (num_filters != layer.output_shape[1]) or (stride != 1):
# layer = conv2dbn(
# layer, name='%s_shortcut' % name, num_filters=num_filters,
# filter_size=1, stride=stride, pad=0, nonlinearity=None, b=None
# )
# for i in range(num_layers):
# conv = conv2dbn(
# conv, name='%s_%s' % (name, i), num_filters=num_filters,
# filter_size=filter_size, pad=pad,
# # Remove nonlinearity for the last conv layer
# nonlinearity=nonlinearity if (i == num_layers - 1) else None,
# # Only apply stride for the first conv layer
# stride=stride if i == 0 else 1
# )
# l = nn.layers.merge.ElemwiseSumLayer([conv, layer], name='%s_merge' % name)
# l = nn.layers.NonlinearityLayer(l, nonlinearity=nonlinearity, name='%s_merge_nl' % name)
# return l
def residual_block(layer, name, num_layers, num_filters,
bottleneck=False, bottleneck_factor=4,
filter_size=(3, 3), stride=1, pad='same',
W=nn.init.GlorotUniform(),
nonlinearity=nn.nonlinearities.rectify):
conv = layer
# When changing filter size or feature map size
if (num_filters != layer.output_shape[1]) or (stride != 1):
# Projection shortcut, aka option B
layer = conv2dbn(
layer, name='%s_shortcut' % name, num_filters=num_filters,
filter_size=1, stride=stride, pad=0, nonlinearity=None, b=None
)
if bottleneck and num_layers < 3:
raise ValueError('At least 3 layers is required for bottleneck configuration')
for i in range(num_layers):
if bottleneck:
# Force then first and last layer to use 1x1 convolution
if i == 0 or (i == (num_layers - 1)):
actual_filter_size = (1, 1)
else:
actual_filter_size = filter_size
# Only increase the filter size to the target size for
# the last layer
if i == (num_layers - 1):
actual_num_filters = num_filters
else:
actual_num_filters = num_filters / bottleneck_factor
else:
actual_num_filters = num_filters
actual_filter_size = filter_size
conv = conv2dbn(
conv, name='%s_%s' % (name, i), num_filters=actual_num_filters,
filter_size=actual_filter_size, pad=pad, W=W,
# Remove nonlinearity for the last conv layer
nonlinearity=nonlinearity if (i < num_layers - 1) else None,
# Only apply stride for the first conv layer
stride=stride if i == 0 else 1
)
l = nn.layers.merge.ElemwiseSumLayer([conv, layer], name='%s_elemsum' % name)
l = nn.layers.NonlinearityLayer(l, nonlinearity=nonlinearity, name='%s_elemsum_nl' % name)
return l
# TODO WTF is localbn? is this different from residual_block3?
def residual_block3_localbn(layer, name, num_layers, num_filters,
bottleneck=False, bottleneck_factor=4,
filter_size=(3, 3), stride=1, pad='same',
W=nn.init.GlorotUniform(),
nonlinearity=nn.nonlinearities.rectify):
conv = layer
# Insert shortcut when changing filter size or feature map size
if (num_filters != layer.output_shape[1]) or (stride != 1):
# Projection shortcut, aka option B
layer = nn.layers.dnn.Conv2DDNNLayer(
layer, name='%s_shortcut' % name, num_filters=num_filters,
filter_size=1, stride=stride, pad=0, nonlinearity=None, b=None
)
if bottleneck and num_layers < 3:
raise ValueError('At least 3 layers is required for bottleneck configuration')
for i in range(num_layers):
if bottleneck:
# Force then first and last layer to use 1x1 convolution
if i == 0 or (i == (num_layers - 1)):
actual_filter_size = (1, 1)
else:
actual_filter_size = filter_size
# Only increase the filter size to the target size for
# the last layer
if i == (num_layers - 1):
actual_num_filters = num_filters
else:
actual_num_filters = num_filters / bottleneck_factor
else:
actual_num_filters = num_filters
actual_filter_size = filter_size
# TODO the last layer should probably not be bn-ed..
conv = conv2dbn(
conv, name='%s_%s' % (name, i), num_filters=actual_num_filters,
filter_size=actual_filter_size, pad=pad, W=W,
# Remove nonlinearity for the last conv layer
nonlinearity=nonlinearity if (i < num_layers - 1) else None,
# Only apply stride for the first conv layer
stride=stride if i == 0 else 1
)
l = nn.layers.merge.ElemwiseSumLayer([conv, layer], name='%s_elemsum' % name)
l = batch_norm(l)
l = nn.layers.NonlinearityLayer(l, nonlinearity=nonlinearity, name='%s_elemsum_nl' % name)
return l
def residual_block3(layer, name, num_layers, num_filters,
bottleneck=False, bottleneck_factor=4,
filter_size=(3, 3), stride=1, pad='same',
W=nn.init.GlorotUniform(),
nonlinearity=nn.nonlinearities.rectify):
conv = layer
# Insert shortcut when changing filter size or feature map size
if (num_filters != layer.output_shape[1]) or (stride != 1):
# Projection shortcut, aka option B
layer = nn.layers.dnn.Conv2DDNNLayer(
layer, name='%s_shortcut' % name, num_filters=num_filters,
filter_size=1, stride=stride, pad=0, nonlinearity=None, b=None
)
if bottleneck and num_layers < 3:
raise ValueError('At least 3 layers is required for bottleneck configuration')
for i in range(num_layers):
if bottleneck:
# Force then first and last layer to use 1x1 convolution
if i == 0 or (i == (num_layers - 1)):
actual_filter_size = (1, 1)
else:
actual_filter_size = filter_size
# Only increase the filter size to the target size for
# the last layer
if i == (num_layers - 1):
actual_num_filters = num_filters
else:
actual_num_filters = num_filters / bottleneck_factor
else:
actual_num_filters = num_filters
actual_filter_size = filter_size
# TODO the last layer should probably not be bn-ed..
conv = conv2dbn2(
conv, name='%s_%s' % (name, i), num_filters=actual_num_filters,
filter_size=actual_filter_size, pad=pad, W=W,
# Remove nonlinearity for the last conv layer
nonlinearity=nonlinearity if (i < num_layers - 1) else None,
# Only apply stride for the first conv layer
stride=stride if i == 0 else 1
)
l = nn.layers.merge.ElemwiseSumLayer([conv, layer], name='%s_elemsum' % name)
l = nn.layers.batch_norm(l)
l = nn.layers.NonlinearityLayer(l, nonlinearity=nonlinearity, name='%s_elemsum_nl' % name)
return l
| 39.54902
| 94
| 0.595935
| 1,014
| 8,068
| 4.583826
| 0.105523
| 0.090361
| 0.039157
| 0.060241
| 0.950732
| 0.947504
| 0.924914
| 0.924914
| 0.924914
| 0.914587
| 0
| 0.016022
| 0.311477
| 8,068
| 203
| 95
| 39.743842
| 0.820702
| 0.26884
| 0
| 0.822581
| 0
| 0
| 0.053253
| 0
| 0
| 0
| 0
| 0.004926
| 0
| 1
| 0.040323
| false
| 0
| 0.024194
| 0
| 0.104839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbe66e15e2f4986cbebcc6242133ac5e5da4dc6f
| 126,621
|
py
|
Python
|
Thrift/gen-py/SpotifakeServices/AlbumService.py
|
BrunoLujan/Spotifake-DESER
|
a811444af0a1326659dd27949c6a1c66c7cd66a1
|
[
"Apache-2.0"
] | null | null | null |
Thrift/gen-py/SpotifakeServices/AlbumService.py
|
BrunoLujan/Spotifake-DESER
|
a811444af0a1326659dd27949c6a1c66c7cd66a1
|
[
"Apache-2.0"
] | null | null | null |
Thrift/gen-py/SpotifakeServices/AlbumService.py
|
BrunoLujan/Spotifake-DESER
|
a811444af0a1326659dd27949c6a1c66c7cd66a1
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def GetAlbumByTitle(self, title):
"""
Get Album by Title
@param title
The Album Title to be obtained
@return Album
Album object
Parameters:
- title
"""
pass
def GetAlbumsByContentCreatorId(self, idContentCreator):
"""
Get list of Track from Content creator by idContentCreator.
@param idContentCreator
The ContentCreator Id which a track will be added
@return list<Album>
Album found by idContenCreator
Parameters:
- idContentCreator
"""
pass
def GetSinglesByContentCreatorId(self, idContentCreator):
"""
Get list of Track from Content creator by idContentCreator.
@param idContentCreator
The ContentCreator Id which a track will be added
@return list<String>
Album found by idContenCreator
Parameters:
- idContentCreator
"""
pass
def GetAlbumByLibraryId(self, idLibrary):
"""
Get list of Album from Library by idLibrary.
@param idLibrary
The Library Id
@return list<Album>
Album found by idLibrary
Parameters:
- idLibrary
"""
pass
def AddAlbum(self, newAlbum, idContenCreator):
"""
Register an Album.
@param newAlbum
@return idNewAlbum
Album object added
Parameters:
- newAlbum
- idContenCreator
"""
pass
def AddFeaturingAlbum(self, idNewAlbum, idContenCreator):
"""
Register a featuring Album.
@param newAlbum
@return idNewAlbum
Featuring added
Parameters:
- idNewAlbum
- idContenCreator
"""
pass
def DeleteAlbum(self, idAlbum):
"""
Delete a Album
@param idAlbum
The Album Id of the Album to be deleted.
@return Id
The Album Id of the Album deleted.
Parameters:
- idAlbum
"""
pass
def UpdateAlbumTitle(self, idAlbum, newAlbumTitle):
"""
Update previously registered Album title.
@param idAlbum
The Album Id of the Album which require an update title.
@return Album
Modified Album obejct.
Parameters:
- idAlbum
- newAlbumTitle
"""
pass
def UpdateAlbumCover(self, idAlbum, newCoverStoragePath):
"""
Update previously registered Album cover.
@param idAlbum
The Album Id of the Album which require an update cover.
@return Album
Modified Album obejct.
Parameters:
- idAlbum
- newCoverStoragePath
"""
pass
def AddAlbumToLibrary(self, idLibrary, idAlbum):
"""
Add an Album to Library.
@param idLibrary
The Library Id to which an album will be added
@param newAlbum
@return Album
Album object added
Parameters:
- idLibrary
- idAlbum
"""
pass
def DeleteLibraryAlbum(self, idLibrary, idAlbum):
"""
Delete an Album from a Library
@param idLibrary
The Library Id which an album will be deleted.
@param idAlbum
The Album Id which will be deleted
@return Id
The Album Id of the Album deleted.
Parameters:
- idLibrary
- idAlbum
"""
pass
def GetAlbumByQuery(self, query):
"""
Get Album by Query
@param query
The query to be obtained
@return Album
list<Album>
Parameters:
- query
"""
pass
def AddImageToMedia(self, fileName, image):
"""
Add image file binary
@param binary image
The binary number that will be keep.
@return bool
true or false.
Parameters:
- fileName
- image
"""
pass
def GetImageToMedia(self, fileName):
"""
Get image file binary
@param binary image
The binary number that will be keep.
@return binary
binary image.
Parameters:
- fileName
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def GetAlbumByTitle(self, title):
"""
Get Album by Title
@param title
The Album Title to be obtained
@return Album
Album object
Parameters:
- title
"""
self.send_GetAlbumByTitle(title)
return self.recv_GetAlbumByTitle()
def send_GetAlbumByTitle(self, title):
self._oprot.writeMessageBegin('GetAlbumByTitle', TMessageType.CALL, self._seqid)
args = GetAlbumByTitle_args()
args.title = title
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetAlbumByTitle(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetAlbumByTitle_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
if result.sErrorInvalidRequestE is not None:
raise result.sErrorInvalidRequestE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetAlbumByTitle failed: unknown result")
def GetAlbumsByContentCreatorId(self, idContentCreator):
"""
Get list of Track from Content creator by idContentCreator.
@param idContentCreator
The ContentCreator Id which a track will be added
@return list<Album>
Album found by idContenCreator
Parameters:
- idContentCreator
"""
self.send_GetAlbumsByContentCreatorId(idContentCreator)
return self.recv_GetAlbumsByContentCreatorId()
def send_GetAlbumsByContentCreatorId(self, idContentCreator):
self._oprot.writeMessageBegin('GetAlbumsByContentCreatorId', TMessageType.CALL, self._seqid)
args = GetAlbumsByContentCreatorId_args()
args.idContentCreator = idContentCreator
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetAlbumsByContentCreatorId(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetAlbumsByContentCreatorId_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetAlbumsByContentCreatorId failed: unknown result")
def GetSinglesByContentCreatorId(self, idContentCreator):
"""
Get list of Track from Content creator by idContentCreator.
@param idContentCreator
The ContentCreator Id which a track will be added
@return list<String>
Album found by idContenCreator
Parameters:
- idContentCreator
"""
self.send_GetSinglesByContentCreatorId(idContentCreator)
return self.recv_GetSinglesByContentCreatorId()
def send_GetSinglesByContentCreatorId(self, idContentCreator):
self._oprot.writeMessageBegin('GetSinglesByContentCreatorId', TMessageType.CALL, self._seqid)
args = GetSinglesByContentCreatorId_args()
args.idContentCreator = idContentCreator
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetSinglesByContentCreatorId(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetSinglesByContentCreatorId_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetSinglesByContentCreatorId failed: unknown result")
def GetAlbumByLibraryId(self, idLibrary):
"""
Get list of Album from Library by idLibrary.
@param idLibrary
The Library Id
@return list<Album>
Album found by idLibrary
Parameters:
- idLibrary
"""
self.send_GetAlbumByLibraryId(idLibrary)
return self.recv_GetAlbumByLibraryId()
def send_GetAlbumByLibraryId(self, idLibrary):
self._oprot.writeMessageBegin('GetAlbumByLibraryId', TMessageType.CALL, self._seqid)
args = GetAlbumByLibraryId_args()
args.idLibrary = idLibrary
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetAlbumByLibraryId(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetAlbumByLibraryId_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetAlbumByLibraryId failed: unknown result")
def AddAlbum(self, newAlbum, idContenCreator):
"""
Register an Album.
@param newAlbum
@return idNewAlbum
Album object added
Parameters:
- newAlbum
- idContenCreator
"""
self.send_AddAlbum(newAlbum, idContenCreator)
return self.recv_AddAlbum()
def send_AddAlbum(self, newAlbum, idContenCreator):
self._oprot.writeMessageBegin('AddAlbum', TMessageType.CALL, self._seqid)
args = AddAlbum_args()
args.newAlbum = newAlbum
args.idContenCreator = idContenCreator
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_AddAlbum(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = AddAlbum_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "AddAlbum failed: unknown result")
def AddFeaturingAlbum(self, idNewAlbum, idContenCreator):
"""
Register a featuring Album.
@param newAlbum
@return idNewAlbum
Featuring added
Parameters:
- idNewAlbum
- idContenCreator
"""
self.send_AddFeaturingAlbum(idNewAlbum, idContenCreator)
return self.recv_AddFeaturingAlbum()
def send_AddFeaturingAlbum(self, idNewAlbum, idContenCreator):
self._oprot.writeMessageBegin('AddFeaturingAlbum', TMessageType.CALL, self._seqid)
args = AddFeaturingAlbum_args()
args.idNewAlbum = idNewAlbum
args.idContenCreator = idContenCreator
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_AddFeaturingAlbum(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = AddFeaturingAlbum_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "AddFeaturingAlbum failed: unknown result")
def DeleteAlbum(self, idAlbum):
"""
Delete a Album
@param idAlbum
The Album Id of the Album to be deleted.
@return Id
The Album Id of the Album deleted.
Parameters:
- idAlbum
"""
self.send_DeleteAlbum(idAlbum)
return self.recv_DeleteAlbum()
def send_DeleteAlbum(self, idAlbum):
self._oprot.writeMessageBegin('DeleteAlbum', TMessageType.CALL, self._seqid)
args = DeleteAlbum_args()
args.idAlbum = idAlbum
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_DeleteAlbum(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = DeleteAlbum_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
if result.sErrorInvalidRequestE is not None:
raise result.sErrorInvalidRequestE
raise TApplicationException(TApplicationException.MISSING_RESULT, "DeleteAlbum failed: unknown result")
def UpdateAlbumTitle(self, idAlbum, newAlbumTitle):
"""
Update previously registered Album title.
@param idAlbum
The Album Id of the Album which require an update title.
@return Album
Modified Album obejct.
Parameters:
- idAlbum
- newAlbumTitle
"""
self.send_UpdateAlbumTitle(idAlbum, newAlbumTitle)
return self.recv_UpdateAlbumTitle()
def send_UpdateAlbumTitle(self, idAlbum, newAlbumTitle):
self._oprot.writeMessageBegin('UpdateAlbumTitle', TMessageType.CALL, self._seqid)
args = UpdateAlbumTitle_args()
args.idAlbum = idAlbum
args.newAlbumTitle = newAlbumTitle
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_UpdateAlbumTitle(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = UpdateAlbumTitle_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
if result.sErrorInvalidRequestE is not None:
raise result.sErrorInvalidRequestE
raise TApplicationException(TApplicationException.MISSING_RESULT, "UpdateAlbumTitle failed: unknown result")
def UpdateAlbumCover(self, idAlbum, newCoverStoragePath):
"""
Update previously registered Album cover.
@param idAlbum
The Album Id of the Album which require an update cover.
@return Album
Modified Album obejct.
Parameters:
- idAlbum
- newCoverStoragePath
"""
self.send_UpdateAlbumCover(idAlbum, newCoverStoragePath)
return self.recv_UpdateAlbumCover()
def send_UpdateAlbumCover(self, idAlbum, newCoverStoragePath):
self._oprot.writeMessageBegin('UpdateAlbumCover', TMessageType.CALL, self._seqid)
args = UpdateAlbumCover_args()
args.idAlbum = idAlbum
args.newCoverStoragePath = newCoverStoragePath
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_UpdateAlbumCover(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = UpdateAlbumCover_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
if result.sErrorInvalidRequestE is not None:
raise result.sErrorInvalidRequestE
raise TApplicationException(TApplicationException.MISSING_RESULT, "UpdateAlbumCover failed: unknown result")
def AddAlbumToLibrary(self, idLibrary, idAlbum):
"""
Add an Album to Library.
@param idLibrary
The Library Id to which an album will be added
@param newAlbum
@return Album
Album object added
Parameters:
- idLibrary
- idAlbum
"""
self.send_AddAlbumToLibrary(idLibrary, idAlbum)
return self.recv_AddAlbumToLibrary()
def send_AddAlbumToLibrary(self, idLibrary, idAlbum):
self._oprot.writeMessageBegin('AddAlbumToLibrary', TMessageType.CALL, self._seqid)
args = AddAlbumToLibrary_args()
args.idLibrary = idLibrary
args.idAlbum = idAlbum
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_AddAlbumToLibrary(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = AddAlbumToLibrary_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "AddAlbumToLibrary failed: unknown result")
def DeleteLibraryAlbum(self, idLibrary, idAlbum):
"""
Delete an Album from a Library
@param idLibrary
The Library Id which an album will be deleted.
@param idAlbum
The Album Id which will be deleted
@return Id
The Album Id of the Album deleted.
Parameters:
- idLibrary
- idAlbum
"""
self.send_DeleteLibraryAlbum(idLibrary, idAlbum)
return self.recv_DeleteLibraryAlbum()
def send_DeleteLibraryAlbum(self, idLibrary, idAlbum):
self._oprot.writeMessageBegin('DeleteLibraryAlbum', TMessageType.CALL, self._seqid)
args = DeleteLibraryAlbum_args()
args.idLibrary = idLibrary
args.idAlbum = idAlbum
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_DeleteLibraryAlbum(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = DeleteLibraryAlbum_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "DeleteLibraryAlbum failed: unknown result")
def GetAlbumByQuery(self, query):
"""
Get Album by Query
@param query
The query to be obtained
@return Album
list<Album>
Parameters:
- query
"""
self.send_GetAlbumByQuery(query)
return self.recv_GetAlbumByQuery()
def send_GetAlbumByQuery(self, query):
self._oprot.writeMessageBegin('GetAlbumByQuery', TMessageType.CALL, self._seqid)
args = GetAlbumByQuery_args()
args.query = query
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetAlbumByQuery(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetAlbumByQuery_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorNotFoundE is not None:
raise result.sErrorNotFoundE
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetAlbumByQuery failed: unknown result")
def AddImageToMedia(self, fileName, image):
"""
Add image file binary
@param binary image
The binary number that will be keep.
@return bool
true or false.
Parameters:
- fileName
- image
"""
self.send_AddImageToMedia(fileName, image)
return self.recv_AddImageToMedia()
def send_AddImageToMedia(self, fileName, image):
self._oprot.writeMessageBegin('AddImageToMedia', TMessageType.CALL, self._seqid)
args = AddImageToMedia_args()
args.fileName = fileName
args.image = image
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_AddImageToMedia(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = AddImageToMedia_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "AddImageToMedia failed: unknown result")
def GetImageToMedia(self, fileName):
"""
Get image file binary
@param binary image
The binary number that will be keep.
@return binary
binary image.
Parameters:
- fileName
"""
self.send_GetImageToMedia(fileName)
return self.recv_GetImageToMedia()
def send_GetImageToMedia(self, fileName):
self._oprot.writeMessageBegin('GetImageToMedia', TMessageType.CALL, self._seqid)
args = GetImageToMedia_args()
args.fileName = fileName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_GetImageToMedia(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = GetImageToMedia_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.sErrorSystemE is not None:
raise result.sErrorSystemE
raise TApplicationException(TApplicationException.MISSING_RESULT, "GetImageToMedia failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["GetAlbumByTitle"] = Processor.process_GetAlbumByTitle
self._processMap["GetAlbumsByContentCreatorId"] = Processor.process_GetAlbumsByContentCreatorId
self._processMap["GetSinglesByContentCreatorId"] = Processor.process_GetSinglesByContentCreatorId
self._processMap["GetAlbumByLibraryId"] = Processor.process_GetAlbumByLibraryId
self._processMap["AddAlbum"] = Processor.process_AddAlbum
self._processMap["AddFeaturingAlbum"] = Processor.process_AddFeaturingAlbum
self._processMap["DeleteAlbum"] = Processor.process_DeleteAlbum
self._processMap["UpdateAlbumTitle"] = Processor.process_UpdateAlbumTitle
self._processMap["UpdateAlbumCover"] = Processor.process_UpdateAlbumCover
self._processMap["AddAlbumToLibrary"] = Processor.process_AddAlbumToLibrary
self._processMap["DeleteLibraryAlbum"] = Processor.process_DeleteLibraryAlbum
self._processMap["GetAlbumByQuery"] = Processor.process_GetAlbumByQuery
self._processMap["AddImageToMedia"] = Processor.process_AddImageToMedia
self._processMap["GetImageToMedia"] = Processor.process_GetImageToMedia
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_GetAlbumByTitle(self, seqid, iprot, oprot):
args = GetAlbumByTitle_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetAlbumByTitle_result()
try:
result.success = self._handler.GetAlbumByTitle(args.title)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except SpotifakeManagement.ttypes.SErrorInvalidRequestException as sErrorInvalidRequestE:
msg_type = TMessageType.REPLY
result.sErrorInvalidRequestE = sErrorInvalidRequestE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetAlbumByTitle", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetAlbumsByContentCreatorId(self, seqid, iprot, oprot):
args = GetAlbumsByContentCreatorId_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetAlbumsByContentCreatorId_result()
try:
result.success = self._handler.GetAlbumsByContentCreatorId(args.idContentCreator)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetAlbumsByContentCreatorId", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetSinglesByContentCreatorId(self, seqid, iprot, oprot):
args = GetSinglesByContentCreatorId_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetSinglesByContentCreatorId_result()
try:
result.success = self._handler.GetSinglesByContentCreatorId(args.idContentCreator)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetSinglesByContentCreatorId", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetAlbumByLibraryId(self, seqid, iprot, oprot):
args = GetAlbumByLibraryId_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetAlbumByLibraryId_result()
try:
result.success = self._handler.GetAlbumByLibraryId(args.idLibrary)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetAlbumByLibraryId", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_AddAlbum(self, seqid, iprot, oprot):
args = AddAlbum_args()
args.read(iprot)
iprot.readMessageEnd()
result = AddAlbum_result()
try:
result.success = self._handler.AddAlbum(args.newAlbum, args.idContenCreator)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("AddAlbum", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_AddFeaturingAlbum(self, seqid, iprot, oprot):
args = AddFeaturingAlbum_args()
args.read(iprot)
iprot.readMessageEnd()
result = AddFeaturingAlbum_result()
try:
result.success = self._handler.AddFeaturingAlbum(args.idNewAlbum, args.idContenCreator)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("AddFeaturingAlbum", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_DeleteAlbum(self, seqid, iprot, oprot):
args = DeleteAlbum_args()
args.read(iprot)
iprot.readMessageEnd()
result = DeleteAlbum_result()
try:
result.success = self._handler.DeleteAlbum(args.idAlbum)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except SpotifakeManagement.ttypes.SErrorInvalidRequestException as sErrorInvalidRequestE:
msg_type = TMessageType.REPLY
result.sErrorInvalidRequestE = sErrorInvalidRequestE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("DeleteAlbum", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_UpdateAlbumTitle(self, seqid, iprot, oprot):
args = UpdateAlbumTitle_args()
args.read(iprot)
iprot.readMessageEnd()
result = UpdateAlbumTitle_result()
try:
result.success = self._handler.UpdateAlbumTitle(args.idAlbum, args.newAlbumTitle)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except SpotifakeManagement.ttypes.SErrorInvalidRequestException as sErrorInvalidRequestE:
msg_type = TMessageType.REPLY
result.sErrorInvalidRequestE = sErrorInvalidRequestE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("UpdateAlbumTitle", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_UpdateAlbumCover(self, seqid, iprot, oprot):
args = UpdateAlbumCover_args()
args.read(iprot)
iprot.readMessageEnd()
result = UpdateAlbumCover_result()
try:
result.success = self._handler.UpdateAlbumCover(args.idAlbum, args.newCoverStoragePath)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except SpotifakeManagement.ttypes.SErrorInvalidRequestException as sErrorInvalidRequestE:
msg_type = TMessageType.REPLY
result.sErrorInvalidRequestE = sErrorInvalidRequestE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("UpdateAlbumCover", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_AddAlbumToLibrary(self, seqid, iprot, oprot):
args = AddAlbumToLibrary_args()
args.read(iprot)
iprot.readMessageEnd()
result = AddAlbumToLibrary_result()
try:
result.success = self._handler.AddAlbumToLibrary(args.idLibrary, args.idAlbum)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("AddAlbumToLibrary", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_DeleteLibraryAlbum(self, seqid, iprot, oprot):
args = DeleteLibraryAlbum_args()
args.read(iprot)
iprot.readMessageEnd()
result = DeleteLibraryAlbum_result()
try:
result.success = self._handler.DeleteLibraryAlbum(args.idLibrary, args.idAlbum)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("DeleteLibraryAlbum", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetAlbumByQuery(self, seqid, iprot, oprot):
args = GetAlbumByQuery_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetAlbumByQuery_result()
try:
result.success = self._handler.GetAlbumByQuery(args.query)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorNotFoundException as sErrorNotFoundE:
msg_type = TMessageType.REPLY
result.sErrorNotFoundE = sErrorNotFoundE
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetAlbumByQuery", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_AddImageToMedia(self, seqid, iprot, oprot):
args = AddImageToMedia_args()
args.read(iprot)
iprot.readMessageEnd()
result = AddImageToMedia_result()
try:
result.success = self._handler.AddImageToMedia(args.fileName, args.image)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("AddImageToMedia", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_GetImageToMedia(self, seqid, iprot, oprot):
args = GetImageToMedia_args()
args.read(iprot)
iprot.readMessageEnd()
result = GetImageToMedia_result()
try:
result.success = self._handler.GetImageToMedia(args.fileName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except SpotifakeManagement.ttypes.SErrorSystemException as sErrorSystemE:
msg_type = TMessageType.REPLY
result.sErrorSystemE = sErrorSystemE
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("GetImageToMedia", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class GetAlbumByTitle_args(object):
"""
Attributes:
- title
"""
def __init__(self, title=None,):
self.title = title
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.title = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByTitle_args')
if self.title is not None:
oprot.writeFieldBegin('title', TType.STRING, 1)
oprot.writeString(self.title.encode('utf-8') if sys.version_info[0] == 2 else self.title)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByTitle_args)
GetAlbumByTitle_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'title', 'UTF8', None, ), # 1
)
class GetAlbumByTitle_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SpotifakeManagement.ttypes.Album()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByTitle_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 3)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByTitle_result)
GetAlbumByTitle_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SpotifakeManagement.ttypes.Album, None], None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class GetAlbumsByContentCreatorId_args(object):
"""
Attributes:
- idContentCreator
"""
def __init__(self, idContentCreator=None,):
self.idContentCreator = idContentCreator
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idContentCreator = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumsByContentCreatorId_args')
if self.idContentCreator is not None:
oprot.writeFieldBegin('idContentCreator', TType.I16, 1)
oprot.writeI16(self.idContentCreator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumsByContentCreatorId_args)
GetAlbumsByContentCreatorId_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idContentCreator', None, None, ), # 1
)
class GetAlbumsByContentCreatorId_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype66, _size63) = iprot.readListBegin()
for _i67 in range(_size63):
_elem68 = SpotifakeManagement.ttypes.Album()
_elem68.read(iprot)
self.success.append(_elem68)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumsByContentCreatorId_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter69 in self.success:
iter69.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumsByContentCreatorId_result)
GetAlbumsByContentCreatorId_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [SpotifakeManagement.ttypes.Album, None], False), None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
)
class GetSinglesByContentCreatorId_args(object):
"""
Attributes:
- idContentCreator
"""
def __init__(self, idContentCreator=None,):
self.idContentCreator = idContentCreator
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idContentCreator = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetSinglesByContentCreatorId_args')
if self.idContentCreator is not None:
oprot.writeFieldBegin('idContentCreator', TType.I16, 1)
oprot.writeI16(self.idContentCreator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetSinglesByContentCreatorId_args)
GetSinglesByContentCreatorId_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idContentCreator', None, None, ), # 1
)
class GetSinglesByContentCreatorId_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype73, _size70) = iprot.readListBegin()
for _i74 in range(_size70):
_elem75 = SpotifakeManagement.ttypes.Album()
_elem75.read(iprot)
self.success.append(_elem75)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetSinglesByContentCreatorId_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter76 in self.success:
iter76.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetSinglesByContentCreatorId_result)
GetSinglesByContentCreatorId_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [SpotifakeManagement.ttypes.Album, None], False), None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
)
class GetAlbumByLibraryId_args(object):
"""
Attributes:
- idLibrary
"""
def __init__(self, idLibrary=None,):
self.idLibrary = idLibrary
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idLibrary = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByLibraryId_args')
if self.idLibrary is not None:
oprot.writeFieldBegin('idLibrary', TType.I16, 1)
oprot.writeI16(self.idLibrary)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByLibraryId_args)
GetAlbumByLibraryId_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idLibrary', None, None, ), # 1
)
class GetAlbumByLibraryId_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype80, _size77) = iprot.readListBegin()
for _i81 in range(_size77):
_elem82 = SpotifakeManagement.ttypes.Album()
_elem82.read(iprot)
self.success.append(_elem82)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByLibraryId_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter83 in self.success:
iter83.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByLibraryId_result)
GetAlbumByLibraryId_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [SpotifakeManagement.ttypes.Album, None], False), None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
)
class AddAlbum_args(object):
"""
Attributes:
- newAlbum
- idContenCreator
"""
def __init__(self, newAlbum=None, idContenCreator=None,):
self.newAlbum = newAlbum
self.idContenCreator = idContenCreator
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.newAlbum = SpotifakeManagement.ttypes.Album()
self.newAlbum.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.idContenCreator = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddAlbum_args')
if self.newAlbum is not None:
oprot.writeFieldBegin('newAlbum', TType.STRUCT, 1)
self.newAlbum.write(oprot)
oprot.writeFieldEnd()
if self.idContenCreator is not None:
oprot.writeFieldBegin('idContenCreator', TType.I16, 2)
oprot.writeI16(self.idContenCreator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddAlbum_args)
AddAlbum_args.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'newAlbum', [SpotifakeManagement.ttypes.Album, None], None, ), # 1
(2, TType.I16, 'idContenCreator', None, None, ), # 2
)
class AddAlbum_result(object):
"""
Attributes:
- success
- sErrorSystemE
"""
def __init__(self, success=None, sErrorSystemE=None,):
self.success = success
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddAlbum_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 1)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddAlbum_result)
AddAlbum_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 1
)
class AddFeaturingAlbum_args(object):
"""
Attributes:
- idNewAlbum
- idContenCreator
"""
def __init__(self, idNewAlbum=None, idContenCreator=None,):
self.idNewAlbum = idNewAlbum
self.idContenCreator = idContenCreator
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idNewAlbum = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.idContenCreator = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddFeaturingAlbum_args')
if self.idNewAlbum is not None:
oprot.writeFieldBegin('idNewAlbum', TType.I16, 1)
oprot.writeI16(self.idNewAlbum)
oprot.writeFieldEnd()
if self.idContenCreator is not None:
oprot.writeFieldBegin('idContenCreator', TType.I16, 2)
oprot.writeI16(self.idContenCreator)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddFeaturingAlbum_args)
AddFeaturingAlbum_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idNewAlbum', None, None, ), # 1
(2, TType.I16, 'idContenCreator', None, None, ), # 2
)
class AddFeaturingAlbum_result(object):
"""
Attributes:
- success
- sErrorSystemE
"""
def __init__(self, success=None, sErrorSystemE=None,):
self.success = success
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddFeaturingAlbum_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 1)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddFeaturingAlbum_result)
AddFeaturingAlbum_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 1
)
class DeleteAlbum_args(object):
"""
Attributes:
- idAlbum
"""
def __init__(self, idAlbum=None,):
self.idAlbum = idAlbum
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idAlbum = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteAlbum_args')
if self.idAlbum is not None:
oprot.writeFieldBegin('idAlbum', TType.I16, 1)
oprot.writeI16(self.idAlbum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteAlbum_args)
DeleteAlbum_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idAlbum', None, None, ), # 1
)
class DeleteAlbum_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteAlbum_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 3)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteAlbum_result)
DeleteAlbum_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class UpdateAlbumTitle_args(object):
"""
Attributes:
- idAlbum
- newAlbumTitle
"""
def __init__(self, idAlbum=None, newAlbumTitle=None,):
self.idAlbum = idAlbum
self.newAlbumTitle = newAlbumTitle
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idAlbum = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.newAlbumTitle = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateAlbumTitle_args')
if self.idAlbum is not None:
oprot.writeFieldBegin('idAlbum', TType.I16, 1)
oprot.writeI16(self.idAlbum)
oprot.writeFieldEnd()
if self.newAlbumTitle is not None:
oprot.writeFieldBegin('newAlbumTitle', TType.STRING, 2)
oprot.writeString(self.newAlbumTitle.encode('utf-8') if sys.version_info[0] == 2 else self.newAlbumTitle)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateAlbumTitle_args)
UpdateAlbumTitle_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idAlbum', None, None, ), # 1
(2, TType.STRING, 'newAlbumTitle', 'UTF8', None, ), # 2
)
class UpdateAlbumTitle_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SpotifakeManagement.ttypes.Album()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateAlbumTitle_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 3)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateAlbumTitle_result)
UpdateAlbumTitle_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SpotifakeManagement.ttypes.Album, None], None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class UpdateAlbumCover_args(object):
"""
Attributes:
- idAlbum
- newCoverStoragePath
"""
def __init__(self, idAlbum=None, newCoverStoragePath=None,):
self.idAlbum = idAlbum
self.newCoverStoragePath = newCoverStoragePath
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idAlbum = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.newCoverStoragePath = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateAlbumCover_args')
if self.idAlbum is not None:
oprot.writeFieldBegin('idAlbum', TType.I16, 1)
oprot.writeI16(self.idAlbum)
oprot.writeFieldEnd()
if self.newCoverStoragePath is not None:
oprot.writeFieldBegin('newCoverStoragePath', TType.STRING, 2)
oprot.writeString(self.newCoverStoragePath.encode('utf-8') if sys.version_info[0] == 2 else self.newCoverStoragePath)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateAlbumCover_args)
UpdateAlbumCover_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idAlbum', None, None, ), # 1
(2, TType.STRING, 'newCoverStoragePath', 'UTF8', None, ), # 2
)
class UpdateAlbumCover_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
- sErrorInvalidRequestE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None, sErrorInvalidRequestE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
self.sErrorInvalidRequestE = sErrorInvalidRequestE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SpotifakeManagement.ttypes.Album()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.sErrorInvalidRequestE = SpotifakeManagement.ttypes.SErrorInvalidRequestException()
self.sErrorInvalidRequestE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('UpdateAlbumCover_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorInvalidRequestE is not None:
oprot.writeFieldBegin('sErrorInvalidRequestE', TType.STRUCT, 3)
self.sErrorInvalidRequestE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(UpdateAlbumCover_result)
UpdateAlbumCover_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SpotifakeManagement.ttypes.Album, None], None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
(3, TType.STRUCT, 'sErrorInvalidRequestE', [SpotifakeManagement.ttypes.SErrorInvalidRequestException, None], None, ), # 3
)
class AddAlbumToLibrary_args(object):
"""
Attributes:
- idLibrary
- idAlbum
"""
def __init__(self, idLibrary=None, idAlbum=None,):
self.idLibrary = idLibrary
self.idAlbum = idAlbum
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idLibrary = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.idAlbum = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddAlbumToLibrary_args')
if self.idLibrary is not None:
oprot.writeFieldBegin('idLibrary', TType.I16, 1)
oprot.writeI16(self.idLibrary)
oprot.writeFieldEnd()
if self.idAlbum is not None:
oprot.writeFieldBegin('idAlbum', TType.I16, 2)
oprot.writeI16(self.idAlbum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddAlbumToLibrary_args)
AddAlbumToLibrary_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idLibrary', None, None, ), # 1
(2, TType.I16, 'idAlbum', None, None, ), # 2
)
class AddAlbumToLibrary_result(object):
"""
Attributes:
- success
- sErrorSystemE
"""
def __init__(self, success=None, sErrorSystemE=None,):
self.success = success
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddAlbumToLibrary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 1)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddAlbumToLibrary_result)
AddAlbumToLibrary_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 1
)
class DeleteLibraryAlbum_args(object):
"""
Attributes:
- idLibrary
- idAlbum
"""
def __init__(self, idLibrary=None, idAlbum=None,):
self.idLibrary = idLibrary
self.idAlbum = idAlbum
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I16:
self.idLibrary = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.idAlbum = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteLibraryAlbum_args')
if self.idLibrary is not None:
oprot.writeFieldBegin('idLibrary', TType.I16, 1)
oprot.writeI16(self.idLibrary)
oprot.writeFieldEnd()
if self.idAlbum is not None:
oprot.writeFieldBegin('idAlbum', TType.I16, 2)
oprot.writeI16(self.idAlbum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteLibraryAlbum_args)
DeleteLibraryAlbum_args.thrift_spec = (
None, # 0
(1, TType.I16, 'idLibrary', None, None, ), # 1
(2, TType.I16, 'idAlbum', None, None, ), # 2
)
class DeleteLibraryAlbum_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I16:
self.success = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('DeleteLibraryAlbum_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I16, 0)
oprot.writeI16(self.success)
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(DeleteLibraryAlbum_result)
DeleteLibraryAlbum_result.thrift_spec = (
(0, TType.I16, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
)
class GetAlbumByQuery_args(object):
"""
Attributes:
- query
"""
def __init__(self, query=None,):
self.query = query
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.query = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByQuery_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 1)
oprot.writeString(self.query.encode('utf-8') if sys.version_info[0] == 2 else self.query)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByQuery_args)
GetAlbumByQuery_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'query', 'UTF8', None, ), # 1
)
class GetAlbumByQuery_result(object):
"""
Attributes:
- success
- sErrorNotFoundE
- sErrorSystemE
"""
def __init__(self, success=None, sErrorNotFoundE=None, sErrorSystemE=None,):
self.success = success
self.sErrorNotFoundE = sErrorNotFoundE
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype87, _size84) = iprot.readListBegin()
for _i88 in range(_size84):
_elem89 = SpotifakeManagement.ttypes.Album()
_elem89.read(iprot)
self.success.append(_elem89)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorNotFoundE = SpotifakeManagement.ttypes.SErrorNotFoundException()
self.sErrorNotFoundE.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetAlbumByQuery_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter90 in self.success:
iter90.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sErrorNotFoundE is not None:
oprot.writeFieldBegin('sErrorNotFoundE', TType.STRUCT, 1)
self.sErrorNotFoundE.write(oprot)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 2)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetAlbumByQuery_result)
GetAlbumByQuery_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [SpotifakeManagement.ttypes.Album, None], False), None, ), # 0
(1, TType.STRUCT, 'sErrorNotFoundE', [SpotifakeManagement.ttypes.SErrorNotFoundException, None], None, ), # 1
(2, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 2
)
class AddImageToMedia_args(object):
"""
Attributes:
- fileName
- image
"""
def __init__(self, fileName=None, image=None,):
self.fileName = fileName
self.image = image
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.fileName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.image = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddImageToMedia_args')
if self.fileName is not None:
oprot.writeFieldBegin('fileName', TType.STRING, 1)
oprot.writeString(self.fileName.encode('utf-8') if sys.version_info[0] == 2 else self.fileName)
oprot.writeFieldEnd()
if self.image is not None:
oprot.writeFieldBegin('image', TType.STRING, 2)
oprot.writeBinary(self.image)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddImageToMedia_args)
AddImageToMedia_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'fileName', 'UTF8', None, ), # 1
(2, TType.STRING, 'image', 'BINARY', None, ), # 2
)
class AddImageToMedia_result(object):
"""
Attributes:
- success
- sErrorSystemE
"""
def __init__(self, success=None, sErrorSystemE=None,):
self.success = success
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AddImageToMedia_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 1)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(AddImageToMedia_result)
AddImageToMedia_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 1
)
class GetImageToMedia_args(object):
"""
Attributes:
- fileName
"""
def __init__(self, fileName=None,):
self.fileName = fileName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.fileName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetImageToMedia_args')
if self.fileName is not None:
oprot.writeFieldBegin('fileName', TType.STRING, 1)
oprot.writeString(self.fileName.encode('utf-8') if sys.version_info[0] == 2 else self.fileName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetImageToMedia_args)
GetImageToMedia_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'fileName', 'UTF8', None, ), # 1
)
class GetImageToMedia_result(object):
"""
Attributes:
- success
- sErrorSystemE
"""
def __init__(self, success=None, sErrorSystemE=None,):
self.success = success
self.sErrorSystemE = sErrorSystemE
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.sErrorSystemE = SpotifakeManagement.ttypes.SErrorSystemException()
self.sErrorSystemE.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GetImageToMedia_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeBinary(self.success)
oprot.writeFieldEnd()
if self.sErrorSystemE is not None:
oprot.writeFieldBegin('sErrorSystemE', TType.STRUCT, 1)
self.sErrorSystemE.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(GetImageToMedia_result)
GetImageToMedia_result.thrift_spec = (
(0, TType.STRING, 'success', 'BINARY', None, ), # 0
(1, TType.STRUCT, 'sErrorSystemE', [SpotifakeManagement.ttypes.SErrorSystemException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| 35.448208
| 134
| 0.610199
| 12,089
| 126,621
| 6.202085
| 0.021921
| 0.014404
| 0.025928
| 0.021607
| 0.872894
| 0.850808
| 0.840831
| 0.831922
| 0.830401
| 0.830401
| 0
| 0.006208
| 0.299089
| 126,621
| 3,571
| 135
| 35.458135
| 0.838603
| 0.0506
| 0
| 0.843798
| 1
| 0
| 0.041712
| 0.007573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103369
| false
| 0.00536
| 0.003063
| 0.032159
| 0.194104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1dca373c90483df08cab4e1663fc8d2126c87100
| 26
|
py
|
Python
|
main.py
|
RazdolbayOne/gui-mark-counter-of-ai-mark
|
7e5c065bbd9aa19f3ceec6c3f35e3cdcebf9078d
|
[
"MIT"
] | null | null | null |
main.py
|
RazdolbayOne/gui-mark-counter-of-ai-mark
|
7e5c065bbd9aa19f3ceec6c3f35e3cdcebf9078d
|
[
"MIT"
] | null | null | null |
main.py
|
RazdolbayOne/gui-mark-counter-of-ai-mark
|
7e5c065bbd9aa19f3ceec6c3f35e3cdcebf9078d
|
[
"MIT"
] | null | null | null |
import tkinter as tk
| 3.714286
| 20
| 0.653846
| 4
| 26
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 26
| 6
| 21
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
69b704b9840d7e385e46b897e01c3a048fe02832
| 168
|
py
|
Python
|
utils/bson_encoding.py
|
Manu343726/biicode-common
|
91b32c6fd1e4a72ce5451183f1766d313cd0e420
|
[
"MIT"
] | 17
|
2015-04-15T09:40:23.000Z
|
2017-05-17T20:34:49.000Z
|
utils/bson_encoding.py
|
Manu343726/biicode-common
|
91b32c6fd1e4a72ce5451183f1766d313cd0e420
|
[
"MIT"
] | 2
|
2015-04-22T11:29:36.000Z
|
2018-09-25T09:31:09.000Z
|
utils/bson_encoding.py
|
bowlofstew/common
|
45e9ca902be7bbbdd73dafe3ab8957bc4a006020
|
[
"MIT"
] | 22
|
2015-04-15T09:46:00.000Z
|
2020-09-29T17:03:31.000Z
|
'''
Bson encode and decode
'''
from bson import BSON
def decode_bson(data):
return BSON.decode(BSON(data))
def encode_bson(data):
return BSON.encode(data)
| 12
| 34
| 0.696429
| 25
| 168
| 4.6
| 0.36
| 0.208696
| 0.243478
| 0.313043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184524
| 168
| 13
| 35
| 12.923077
| 0.839416
| 0.130952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
69cedb9ccaa4b0eca75ae7dbc07045a7ed4e5255
| 5,355
|
py
|
Python
|
docs/theory/figures/potential.py
|
wolfv/ElastoPlasticQPot
|
7753c6cfb34d4bc79bc7ef07738a0dd1046222eb
|
[
"MIT"
] | null | null | null |
docs/theory/figures/potential.py
|
wolfv/ElastoPlasticQPot
|
7753c6cfb34d4bc79bc7ef07738a0dd1046222eb
|
[
"MIT"
] | null | null | null |
docs/theory/figures/potential.py
|
wolfv/ElastoPlasticQPot
|
7753c6cfb34d4bc79bc7ef07738a0dd1046222eb
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use(['goose','goose-latex','goose-tick-lower'])
# --------------------------------------------------------------------------------------------------
eps_m = np.linspace(-1.1,+1.1,1000)
U = 9./2. * eps_m**2.
fig,ax = plt.subplots()
ax.plot(
eps_m ,
U ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{m}$')
plt.ylabel(r'$U ( \varepsilon_\mathrm{m} ) $')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([])
plt.savefig('potential_U_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
V = 3./2. * eps_eq**2.
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
V ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$V ( \varepsilon_\mathrm{eq} ) $')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([])
plt.savefig('potential_V-elas_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
V = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
Vi = .5 * ( (eps_eq-abar)**2. - delta**2. )
V[ idx ] = Vi[ idx ]
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
V ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$V ( \varepsilon_\mathrm{eq} ) $')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([])
plt.savefig('potential_V-plas_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
V = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
Vi = - ( delta / ( np.pi ) )**2. * ( 1. + np.cos( np.pi * ( eps_eq - abar ) / delta ) )
V[ idx ] = Vi[ idx ]
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
V ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$V ( \varepsilon_\mathrm{eq} ) $')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([])
plt.savefig('potential_V-plas-smooth_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
sig_eq = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
dVi = eps_eq-abar
sig_eq[ idx ] = dVi[ idx ]
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
sig_eq ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$\partial V / \partial \varepsilon_\mathrm{eq}$')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([0])
plt.savefig('potential_dV-plas_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
sig_eq = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
dVi = ( delta / ( np.pi ) ) * np.sin( np.pi * ( eps_eq - abar ) / delta )
sig_eq[ idx ] = dVi[ idx ]
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
sig_eq ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$\partial V / \partial \varepsilon_\mathrm{eq}$')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([0])
plt.savefig('potential_dV-plas-smooth_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
sig_eq = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
dVi = eps_eq-abar
sig_eq[ idx ] = np.abs(dVi[ idx ])
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
sig_eq ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$\sigma_\mathrm{eq}$')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([0])
plt.savefig('potential_sigeq-plas_raw.svg')
# --------------------------------------------------------------------------------------------------
eps_eq = np.linspace(0,10,1000)
sig_eq = np.zeros(eps_eq.shape)
a = [ -1. , 1. , 1.5 , 3. , 6. , 10.1 ]
for a0 , a1 in zip( a[:-1] , a[1:] ):
abar = ( a1 + a0 ) / 2.
delta = ( a1 - a0 ) / 2.
idx = np.where( ( eps_eq >= a0 ) * ( eps_eq < a1 ) )[0]
dVi = ( delta / ( np.pi ) ) * np.sin( np.pi * ( eps_eq - abar ) / delta )
sig_eq[ idx ] = np.abs(dVi[ idx ])
fig,ax = plt.subplots()
ax.plot(
eps_eq ,
sig_eq ,
color = 'k'
)
plt.xlabel(r'$\varepsilon_\mathrm{eq}$')
plt.ylabel(r'$\sigma_\mathrm{eq}$')
ax.xaxis.set_ticks([0])
ax.yaxis.set_ticks([0])
plt.savefig('potential_sigeq-plas-smooth_raw.svg')
# --------------------------------------------------------------------------------------------------
| 20.207547
| 100
| 0.457143
| 766
| 5,355
| 3.062663
| 0.096606
| 0.08312
| 0.046036
| 0.054561
| 0.906223
| 0.906223
| 0.898551
| 0.88491
| 0.873402
| 0.873402
| 0
| 0.048904
| 0.190476
| 5,355
| 264
| 101
| 20.284091
| 0.492272
| 0.1662
| 0
| 0.806667
| 0
| 0
| 0.16124
| 0.120593
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02
| 0
| 0.02
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69e9217e831bca8d96cfc913059c4f8df2d1ea89
| 221
|
py
|
Python
|
aiogram_tools/__init__.py
|
LDmitriy7/aiogram-tools
|
e1d00b9707b85930522e81188875d737b7e67f02
|
[
"MIT"
] | 1
|
2021-09-05T14:46:40.000Z
|
2021-09-05T14:46:40.000Z
|
aiogram_tools/__init__.py
|
LDmitriy7/aiogram-tools
|
e1d00b9707b85930522e81188875d737b7e67f02
|
[
"MIT"
] | null | null | null |
aiogram_tools/__init__.py
|
LDmitriy7/aiogram-tools
|
e1d00b9707b85930522e81188875d737b7e67f02
|
[
"MIT"
] | null | null | null |
from aiogram_tools.dispatcher import Dispatcher
from aiogram_tools import filters
from aiogram_tools import middlewares
from aiogram_tools import keyboards
__all__ = ['Dispatcher', 'filters', 'middlewares', 'keyboards']
| 31.571429
| 63
| 0.828054
| 26
| 221
| 6.730769
| 0.346154
| 0.251429
| 0.365714
| 0.377143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104072
| 221
| 6
| 64
| 36.833333
| 0.883838
| 0
| 0
| 0
| 0
| 0
| 0.167421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3861533bf34080e660c904e79461bbefea3323bc
| 700
|
py
|
Python
|
src/libs/faker/providers/phone_number/en_NZ/__init__.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
src/libs/faker/providers/phone_number/en_NZ/__init__.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
src/libs/faker/providers/phone_number/en_NZ/__init__.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
class Provider(PhoneNumberProvider):
formats = ("%## ####", "%##-####", "%######", "0{{area_code}} %## ####", "0{{area_code}} %##-####", "0{{area_code}}-%##-####", "0{{area_code}} %######", "(0{{area_code}}) %## ####", "(0{{area_code}}) %##-####", "(0{{area_code}}) %######", "+64 {{area_code}} %## ####", "+64 {{area_code}} %##-####", "+64 {{area_code}} %######", "+64-{{area_code}}-%##-####", "+64{{area_code}}%######")
area_codes = ["20", "21", "22", "27", "29", "3", "4", "6", "7", "9"]
def area_code(self):
return self.numerify(self.random_element(self.area_codes))
def phone_number(self):
pattern = self.random_element(self.formats)
return self.numerify(self.generator.parse(pattern))
| 87.5
| 385
| 0.504286
| 82
| 700
| 4.085366
| 0.365854
| 0.310448
| 0.18806
| 0.179104
| 0.337313
| 0.337313
| 0.337313
| 0.337313
| 0.337313
| 0.337313
| 0
| 0.050553
| 0.095714
| 700
| 8
| 386
| 87.5
| 0.478673
| 0
| 0
| 0
| 0
| 0
| 0.46933
| 0.10271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
38bacb78603f04f9509e7ff89664f399106cf9df
| 188
|
py
|
Python
|
apps/stt_tests/serializers/__init__.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
apps/stt_tests/serializers/__init__.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
apps/stt_tests/serializers/__init__.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
from .stt_test_result_serializer import SttTestResultSerializer
from .stt_test_result_xlsx_serializer import SttTestResultXlsxSerializer
from .stt_test_serializer import SttTestSerializer
| 47
| 72
| 0.920213
| 21
| 188
| 7.809524
| 0.47619
| 0.128049
| 0.20122
| 0.207317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 188
| 3
| 73
| 62.666667
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2a06456679e6324d1b7361b62707ed362db71b43
| 10,441
|
py
|
Python
|
tests/integrationv2/test_client_authentication.py
|
aicas/s2n-tls
|
1bfdf86b7b4210bc6f67ef4c075502c8d8968f05
|
[
"Apache-2.0"
] | 1
|
2020-02-03T08:21:41.000Z
|
2020-02-03T08:21:41.000Z
|
tests/integrationv2/test_client_authentication.py
|
aicas/s2n-tls
|
1bfdf86b7b4210bc6f67ef4c075502c8d8968f05
|
[
"Apache-2.0"
] | null | null | null |
tests/integrationv2/test_client_authentication.py
|
aicas/s2n-tls
|
1bfdf86b7b4210bc6f67ef4c075502c8d8968f05
|
[
"Apache-2.0"
] | 1
|
2022-01-01T07:28:19.000Z
|
2022-01-01T07:28:19.000Z
|
import copy
import os
import pytest
import time
from configuration import (available_ports, ALL_TEST_CIPHERS, ALL_TEST_CURVES,
ALL_TEST_CERTS, PROTOCOLS)
from common import Certificates, ProviderOptions, Protocols, data_bytes
from fixtures import managed_process
from providers import Provider, S2N, OpenSSL
from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version
# If we test every available cert, the test takes too long.
# Choose a good representative subset.
CERTS_TO_TEST = [
Certificates.RSA_1024_SHA256,
Certificates.RSA_4096_SHA512,
Certificates.ECDSA_256,
Certificates.ECDSA_384,
Certificates.RSA_PSS_2048_SHA256,
]
def assert_openssl_handshake_complete(results, is_complete=True):
if is_complete:
assert b'read finished' in results.stderr
assert b'write finished' in results.stderr
else:
assert b'read finished' not in results.stderr or b'write finished' not in results.stderr
def assert_s2n_handshake_complete(results, protocol, provider, is_complete=True):
expected_version = get_expected_s2n_version(protocol, provider)
if is_complete:
assert bytes("Actual protocol version: {}".format(expected_version).encode('utf-8')) in results.stdout
else:
assert bytes("Actual protocol version: {}".format(expected_version).encode('utf-8')) not in results.stdout
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", CERTS_TO_TEST, ids=get_parameter_name)
@pytest.mark.parametrize("client_certificate", CERTS_TO_TEST, ids=get_parameter_name)
def test_client_auth_with_s2n_server(managed_process, cipher, provider, protocol, certificate, client_certificate):
port = next(available_ports)
if protocol < Protocols.TLS12 and client_certificate.algorithm == 'EC':
pytest.xfail("Client auth with ECDSA certs is currently broken for versions < TLS1.2")
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
data_to_send=random_bytes,
use_client_auth=True,
key=client_certificate.key,
cert=client_certificate.cert,
trust_store=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.trust_store = client_certificate.cert
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(provider, client_options, timeout=5)
# Openssl should send a client certificate and complete the handshake
for results in client.get_results():
assert results.exception is None
assert results.exit_code == 0
assert b'write client certificate' in results.stderr
assert b'write certificate verify' in results.stderr
assert_openssl_handshake_complete(results)
# S2N should successfully connect
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert_s2n_handshake_complete(results, protocol, provider)
assert random_bytes in results.stdout
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", CERTS_TO_TEST, ids=get_parameter_name)
@pytest.mark.parametrize("client_certificate", CERTS_TO_TEST, ids=get_parameter_name)
def test_client_auth_with_s2n_server_using_nonmatching_certs(managed_process, cipher, provider, protocol, certificate, client_certificate):
port = next(available_ports)
if protocol < Protocols.TLS12 and client_certificate.algorithm == 'EC':
pytest.xfail("Client auth with ECDSA certs is current broken for versions < TLS1.2")
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
data_to_send=b'',
use_client_auth=True,
key=client_certificate.key,
cert=client_certificate.cert,
trust_store=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
# Tell the server to expect the wrong certificate
server_options.trust_store=Certificates.RSA_2048_SHA256_WILDCARD.cert
server = managed_process(S2N, server_options, timeout=5)
client = managed_process(OpenSSL, client_options, timeout=5)
# Openssl should tell us that a certificate was sent, but the handshake did not complete
for results in client.get_results():
assert results.exception is None
assert b'write client certificate' in results.stderr
assert b'write certificate verify' in results.stderr
# TLS1.3 OpenSSL fails after the handshake, but pre-TLS1.3 fails during
if protocol is not Protocols.TLS13:
assert results.exit_code != 0
assert_openssl_handshake_complete(results, False)
# S2N should tell us that mutual authentication failed due to an untrusted cert
for results in server.get_results():
assert results.exception is None
assert results.exit_code != 0
assert b'Certificate is untrusted' in results.stderr
assert b'Error: Mutual Auth was required, but not negotiated' in results.stderr
assert_s2n_handshake_complete(results, protocol, provider, False)
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", CERTS_TO_TEST, ids=get_parameter_name)
def test_client_auth_with_s2n_client_no_cert(managed_process, cipher, protocol, provider, certificate):
port = next(available_ports)
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
data_to_send=random_bytes,
use_client_auth=True,
trust_store=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
# Openssl should tell us that a cert was requested but not received
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert b'write certificate request' in results.stderr
assert b'read client certificate' not in results.stderr
assert b"peer did not return a certificate" in results.stderr
assert_openssl_handshake_complete(results, False)
for results in client.get_results():
assert results.exception is None
# TLS1.3 OpenSSL fails after the handshake, but pre-TLS1.3 fails during
if protocol is not Protocols.TLS13:
assert (results.exit_code != 0)
assert b"Failed to negotiate: 'TLS alert received'" in results.stderr
assert_s2n_handshake_complete(results, protocol, provider, False)
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("provider", [OpenSSL], ids=get_parameter_name)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name)
@pytest.mark.parametrize("certificate", CERTS_TO_TEST, ids=get_parameter_name)
@pytest.mark.parametrize("client_certificate", CERTS_TO_TEST, ids=get_parameter_name)
def test_client_auth_with_s2n_client_with_cert(managed_process, cipher, protocol, provider, certificate, client_certificate):
port = next(available_ports)
if protocol < Protocols.TLS12 and client_certificate.algorithm == 'EC':
pytest.xfail("Client auth with ECDSA certs is currently broken for versions < TLS1.2")
random_bytes = data_bytes(64)
client_options = ProviderOptions(
mode=Provider.ClientMode,
host="localhost",
port=port,
cipher=cipher,
data_to_send=random_bytes,
use_client_auth=True,
key=client_certificate.key,
cert=client_certificate.cert,
trust_store=certificate.cert,
insecure=False,
protocol=protocol)
server_options = copy.copy(client_options)
server_options.data_to_send = None
server_options.mode = Provider.ServerMode
server_options.key = certificate.key
server_options.cert = certificate.cert
server_options.trust_store = client_certificate.cert
server = managed_process(provider, server_options, timeout=5)
client = managed_process(S2N, client_options, timeout=5)
# The client should connect and return without error
for results in client.get_results():
assert results.exception is None
assert results.exit_code == 0
assert_s2n_handshake_complete(results, protocol, provider)
# Openssl should indicate the certificate was successfully received.
for results in server.get_results():
assert results.exception is None
assert results.exit_code == 0
assert random_bytes[1:] in results.stdout
assert b'read client certificate' in results.stderr
assert b'read certificate verify' in results.stderr
assert_openssl_handshake_complete(results)
| 42.443089
| 139
| 0.743703
| 1,333
| 10,441
| 5.6009
| 0.129032
| 0.047013
| 0.042861
| 0.048353
| 0.818511
| 0.789579
| 0.771899
| 0.74779
| 0.740825
| 0.740825
| 0
| 0.011773
| 0.178335
| 10,441
| 245
| 140
| 42.616327
| 0.858492
| 0.070012
| 0
| 0.768041
| 0
| 0
| 0.09209
| 0
| 0
| 0
| 0
| 0
| 0.231959
| 1
| 0.030928
| false
| 0
| 0.046392
| 0
| 0.07732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2a2d5a856ba7153a0d3db72cd349fa84695856af
| 158,369
|
py
|
Python
|
pyboto3/clouddirectory.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/clouddirectory.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/clouddirectory.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_facet_to_object(DirectoryArn=None, SchemaFacet=None, ObjectAttributeList=None, ObjectReference=None):
"""
Adds a new Facet to an object.
See also: AWS API Documentation
:example: response = client.add_facet_to_object(
DirectoryArn='string',
SchemaFacet={
'SchemaArn': 'string',
'FacetName': 'string'
},
ObjectAttributeList=[
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
ObjectReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type SchemaFacet: dict
:param SchemaFacet: [REQUIRED]
Identifiers for the facet that you are adding to the object.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
:type ObjectAttributeList: list
:param ObjectAttributeList: Attributes on the facet that you are adding to the object.
(dict) --The combination of an attribute key and an attribute value.
Key (dict) -- [REQUIRED]The key of the attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Value (dict) -- [REQUIRED]The value of the attribute.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
A reference to the object you are adding the specified facet to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def apply_schema(PublishedSchemaArn=None, DirectoryArn=None):
"""
Copies the input published schema into the Directory with the same name and version as that of the published schema .
See also: AWS API Documentation
:example: response = client.apply_schema(
PublishedSchemaArn='string',
DirectoryArn='string'
)
:type PublishedSchemaArn: string
:param PublishedSchemaArn: [REQUIRED]
Published schema Amazon Resource Name (ARN) that needs to be copied. For more information, see arns .
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory into which the schema is copied. For more information, see arns .
:rtype: dict
:return: {
'AppliedSchemaArn': 'string',
'DirectoryArn': 'string'
}
"""
pass
def attach_object(DirectoryArn=None, ParentReference=None, ChildReference=None, LinkName=None):
"""
Attaches an existing object to another object. An object can be accessed in two ways:
See also: AWS API Documentation
:example: response = client.attach_object(
DirectoryArn='string',
ParentReference={
'Selector': 'string'
},
ChildReference={
'Selector': 'string'
},
LinkName='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns .
:type ParentReference: dict
:param ParentReference: [REQUIRED]
The parent object reference.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type ChildReference: dict
:param ChildReference: [REQUIRED]
The child object reference to be attached to the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type LinkName: string
:param LinkName: [REQUIRED]
The link name with which the child object is attached to the parent.
:rtype: dict
:return: {
'AttachedObjectIdentifier': 'string'
}
:returns:
DirectoryArn (string) -- [REQUIRED]
Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns .
ParentReference (dict) -- [REQUIRED]
The parent object reference.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
ChildReference (dict) -- [REQUIRED]
The child object reference to be attached to the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
LinkName (string) -- [REQUIRED]
The link name with which the child object is attached to the parent.
"""
pass
def attach_policy(DirectoryArn=None, PolicyReference=None, ObjectReference=None):
"""
Attaches a policy object to a regular object. An object can have a limited number of attached policies.
See also: AWS API Documentation
:example: response = client.attach_policy(
DirectoryArn='string',
PolicyReference={
'Selector': 'string'
},
ObjectReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: The Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns .
:type PolicyReference: dict
:param PolicyReference: [REQUIRED]
The reference that is associated with the policy object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object to which the policy will be attached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def attach_to_index(DirectoryArn=None, IndexReference=None, TargetReference=None):
"""
Attaches the specified object to the specified index.
See also: AWS API Documentation
:example: response = client.attach_to_index(
DirectoryArn='string',
IndexReference={
'Selector': 'string'
},
TargetReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where the object and index exist.
:type IndexReference: dict
:param IndexReference: [REQUIRED]
A reference to the index that you are attaching the object to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type TargetReference: dict
:param TargetReference: [REQUIRED]
A reference to the object that you are attaching to the index.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {
'AttachedObjectIdentifier': 'string'
}
"""
pass
def attach_typed_link(DirectoryArn=None, SourceObjectReference=None, TargetObjectReference=None, TypedLinkFacet=None, Attributes=None):
"""
Attaches a typed link to a specified source and target object. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.attach_typed_link(
DirectoryArn='string',
SourceObjectReference={
'Selector': 'string'
},
TargetObjectReference={
'Selector': 'string'
},
TypedLinkFacet={
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
Attributes=[
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where you want to attach the typed link.
:type SourceObjectReference: dict
:param SourceObjectReference: [REQUIRED]
Identifies the source object that the typed link will attach to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type TargetObjectReference: dict
:param TargetObjectReference: [REQUIRED]
Identifies the target object that the typed link will attach to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type TypedLinkFacet: dict
:param TypedLinkFacet: [REQUIRED]
Identifies the typed link facet that is associated with the typed link.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique name of the typed link facet.
:type Attributes: list
:param Attributes: [REQUIRED]
An ordered set of attributes that are associated with the typed link.
(dict) --Identifies the attribute name and value for a typed link.
AttributeName (string) -- [REQUIRED]The attribute name of the typed link.
Value (dict) -- [REQUIRED]The value for the typed link.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:rtype: dict
:return: {
'TypedLinkSpecifier': {
'TypedLinkFacet': {
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
'SourceObjectReference': {
'Selector': 'string'
},
'TargetObjectReference': {
'Selector': 'string'
},
'IdentityAttributeValues': [
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
}
}
:returns:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
"""
pass
def batch_read(DirectoryArn=None, Operations=None, ConsistencyLevel=None):
"""
Performs all the read operations in a batch.
See also: AWS API Documentation
:example: response = client.batch_read(
DirectoryArn='string',
Operations=[
{
'ListObjectAttributes': {
'ObjectReference': {
'Selector': 'string'
},
'NextToken': 'string',
'MaxResults': 123,
'FacetFilter': {
'SchemaArn': 'string',
'FacetName': 'string'
}
},
'ListObjectChildren': {
'ObjectReference': {
'Selector': 'string'
},
'NextToken': 'string',
'MaxResults': 123
}
},
],
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory . For more information, see arns .
:type Operations: list
:param Operations: [REQUIRED]
A list of operations that are part of the batch.
(dict) --Represents the output of a BatchRead operation.
ListObjectAttributes (dict) --Lists all attributes that are associated with an object.
ObjectReference (dict) -- [REQUIRED]Reference of the object whose attributes need to be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
NextToken (string) --The pagination token.
MaxResults (integer) --The maximum number of items to be retrieved in a single call. This is an approximate number.
FacetFilter (dict) --Used to filter the list of object attributes that are associated with a certain facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ListObjectChildren (dict) --Returns a paginated list of child objects that are associated with a given object.
ObjectReference (dict) -- [REQUIRED]Reference of the object for which child objects are being listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
NextToken (string) --The pagination token.
MaxResults (integer) --Maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'Responses': [
{
'SuccessfulResponse': {
'ListObjectAttributes': {
'Attributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'NextToken': 'string'
},
'ListObjectChildren': {
'Children': {
'string': 'string'
},
'NextToken': 'string'
}
},
'ExceptionResponse': {
'Type': 'ValidationException'|'InvalidArnException'|'ResourceNotFoundException'|'InvalidNextTokenException'|'AccessDeniedException'|'NotNodeException',
'Message': 'string'
}
},
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_write(DirectoryArn=None, Operations=None):
"""
Performs all the write operations in a batch. Either all the operations succeed or none. Batch writes supports only object-related operations.
See also: AWS API Documentation
:example: response = client.batch_write(
DirectoryArn='string',
Operations=[
{
'CreateObject': {
'SchemaFacet': [
{
'SchemaArn': 'string',
'FacetName': 'string'
},
],
'ObjectAttributeList': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ParentReference': {
'Selector': 'string'
},
'LinkName': 'string',
'BatchReferenceName': 'string'
},
'AttachObject': {
'ParentReference': {
'Selector': 'string'
},
'ChildReference': {
'Selector': 'string'
},
'LinkName': 'string'
},
'DetachObject': {
'ParentReference': {
'Selector': 'string'
},
'LinkName': 'string',
'BatchReferenceName': 'string'
},
'UpdateObjectAttributes': {
'ObjectReference': {
'Selector': 'string'
},
'AttributeUpdates': [
{
'ObjectAttributeKey': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'ObjectAttributeAction': {
'ObjectAttributeActionType': 'CREATE_OR_UPDATE'|'DELETE',
'ObjectAttributeUpdateValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
]
},
'DeleteObject': {
'ObjectReference': {
'Selector': 'string'
}
},
'AddFacetToObject': {
'SchemaFacet': {
'SchemaArn': 'string',
'FacetName': 'string'
},
'ObjectAttributeList': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ObjectReference': {
'Selector': 'string'
}
},
'RemoveFacetFromObject': {
'SchemaFacet': {
'SchemaArn': 'string',
'FacetName': 'string'
},
'ObjectReference': {
'Selector': 'string'
}
}
},
]
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory . For more information, see arns .
:type Operations: list
:param Operations: [REQUIRED]
A list of operations that are part of the batch.
(dict) --Represents the output of a BatchWrite operation.
CreateObject (dict) --Creates an object.
SchemaFacet (list) -- [REQUIRED]A list of FacetArns that will be associated with the object. For more information, see arns .
(dict) --A facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ObjectAttributeList (list) -- [REQUIRED]An attribute map, which contains an attribute ARN as the key and attribute value as the map value.
(dict) --The combination of an attribute key and an attribute value.
Key (dict) -- [REQUIRED]The key of the attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Value (dict) -- [REQUIRED]The value of the attribute.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
ParentReference (dict) -- [REQUIRED]If specified, the parent reference to which this object will be attached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
LinkName (string) -- [REQUIRED]The name of the link.
BatchReferenceName (string) -- [REQUIRED]The batch reference name. See Batches for more information.
AttachObject (dict) --Attaches an object to a Directory .
ParentReference (dict) -- [REQUIRED]The parent object reference.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
ChildReference (dict) -- [REQUIRED]The child object reference that is to be attached to the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
LinkName (string) -- [REQUIRED]The name of the link.
DetachObject (dict) --Detaches an object from a Directory .
ParentReference (dict) -- [REQUIRED]Parent reference from which the object with the specified link name is detached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
LinkName (string) -- [REQUIRED]The name of the link.
BatchReferenceName (string) -- [REQUIRED]The batch reference name. See Batches for more information.
UpdateObjectAttributes (dict) --Updates a given object's attributes.
ObjectReference (dict) -- [REQUIRED]Reference that identifies the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
AttributeUpdates (list) -- [REQUIRED]Attributes update structure.
(dict) --Structure that contains attribute update information.
ObjectAttributeKey (dict) --The key of the attribute being updated.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
ObjectAttributeAction (dict) --The action to perform as part of the attribute update.
ObjectAttributeActionType (string) --A type that can be either Update or Delete .
ObjectAttributeUpdateValue (dict) --The value that you want to update to.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
DeleteObject (dict) --Deletes an object in a Directory .
ObjectReference (dict) -- [REQUIRED]The reference that identifies the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
AddFacetToObject (dict) --A batch operation that adds a facet to an object.
SchemaFacet (dict) -- [REQUIRED]Represents the facet being added to the object.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ObjectAttributeList (list) -- [REQUIRED]The attributes to set on the object.
(dict) --The combination of an attribute key and an attribute value.
Key (dict) -- [REQUIRED]The key of the attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Value (dict) -- [REQUIRED]The value of the attribute.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
ObjectReference (dict) -- [REQUIRED]A reference to the object being mutated.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
RemoveFacetFromObject (dict) --A batch operation that removes a facet from an object.
SchemaFacet (dict) -- [REQUIRED]The facet to remove from the object.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
ObjectReference (dict) -- [REQUIRED]A reference to the object whose facet will be removed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {
'Responses': [
{
'CreateObject': {
'ObjectIdentifier': 'string'
},
'AttachObject': {
'attachedObjectIdentifier': 'string'
},
'DetachObject': {
'detachedObjectIdentifier': 'string'
},
'UpdateObjectAttributes': {
'ObjectIdentifier': 'string'
},
'DeleteObject': {}
,
'AddFacetToObject': {}
,
'RemoveFacetFromObject': {}
},
]
}
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_directory(Name=None, SchemaArn=None):
"""
Creates a Directory by copying the published schema into the directory. A directory cannot be created without a schema.
See also: AWS API Documentation
:example: response = client.create_directory(
Name='string',
SchemaArn='string'
)
:type Name: string
:param Name: [REQUIRED]
The name of the Directory . Should be unique per account, per region.
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) of the published schema that will be copied into the data Directory . For more information, see arns .
:rtype: dict
:return: {
'DirectoryArn': 'string',
'Name': 'string',
'ObjectIdentifier': 'string',
'AppliedSchemaArn': 'string'
}
"""
pass
def create_facet(SchemaArn=None, Name=None, Attributes=None, ObjectType=None):
"""
Creates a new Facet in a schema. Facet creation is allowed only in development or applied schemas.
See also: AWS API Documentation
:example: response = client.create_facet(
SchemaArn='string',
Name='string',
Attributes=[
{
'Name': 'string',
'AttributeDefinition': {
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
}
},
'AttributeReference': {
'TargetFacetName': 'string',
'TargetAttributeName': 'string'
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
ObjectType='NODE'|'LEAF_NODE'|'POLICY'|'INDEX'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The schema ARN in which the new Facet will be created. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the Facet , which is unique for a given schema.
:type Attributes: list
:param Attributes: The attributes that are associated with the Facet .
(dict) --An attribute that is associated with the Facet .
Name (string) -- [REQUIRED]The name of the facet attribute.
AttributeDefinition (dict) --A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
AttributeReference (dict) --An attribute reference that is associated with the attribute. See Attribute References for more information.
TargetFacetName (string) -- [REQUIRED]The target facet name that is associated with the facet reference. See Attribute References for more information.
TargetAttributeName (string) -- [REQUIRED]The target attribute name that is associated with the facet reference. See Attribute References for more information.
RequiredBehavior (string) --The required behavior of the FacetAttribute .
:type ObjectType: string
:param ObjectType: [REQUIRED]
Specifies whether a given object created from this facet is of type node, leaf node, policy or index.
Node: Can have multiple children but one parent.
Leaf node: Cannot have children but can have multiple parents.
Policy: Allows you to store a policy document and policy type. For more information, see Policies .
Index: Can be created with the Index API.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def create_index(DirectoryArn=None, OrderedIndexedAttributeList=None, IsUnique=None, ParentReference=None, LinkName=None):
"""
Creates an index object. See Indexing for more information.
See also: AWS API Documentation
:example: response = client.create_index(
DirectoryArn='string',
OrderedIndexedAttributeList=[
{
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
],
IsUnique=True|False,
ParentReference={
'Selector': 'string'
},
LinkName='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory where the index should be created.
:type OrderedIndexedAttributeList: list
:param OrderedIndexedAttributeList: [REQUIRED]
Specifies the attributes that should be indexed on. Currently only a single attribute is supported.
(dict) --A unique identifier for an attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
:type IsUnique: boolean
:param IsUnique: [REQUIRED]
Indicates whether the attribute that is being indexed has unique values or not.
:type ParentReference: dict
:param ParentReference: A reference to the parent object that contains the index object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type LinkName: string
:param LinkName: The name of the link between the parent object and the index object.
:rtype: dict
:return: {
'ObjectIdentifier': 'string'
}
"""
pass
def create_object(DirectoryArn=None, SchemaFacets=None, ObjectAttributeList=None, ParentReference=None, LinkName=None):
"""
Creates an object in a Directory . Additionally attaches the object to a parent, if a parent reference and LinkName is specified. An object is simply a collection of Facet attributes. You can also use this API call to create a policy object, if the facet from which you create the object is a policy facet.
See also: AWS API Documentation
:example: response = client.create_object(
DirectoryArn='string',
SchemaFacets=[
{
'SchemaArn': 'string',
'FacetName': 'string'
},
],
ObjectAttributeList=[
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
ParentReference={
'Selector': 'string'
},
LinkName='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory in which the object will be created. For more information, see arns .
:type SchemaFacets: list
:param SchemaFacets: [REQUIRED]
A list of schema facets to be associated with the object that contains SchemaArn and facet name. For more information, see arns .
(dict) --A facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
:type ObjectAttributeList: list
:param ObjectAttributeList: The attribute map whose attribute ARN contains the key and attribute value as the map value.
(dict) --The combination of an attribute key and an attribute value.
Key (dict) -- [REQUIRED]The key of the attribute.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Value (dict) -- [REQUIRED]The value of the attribute.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type ParentReference: dict
:param ParentReference: If specified, the parent reference to which this object will be attached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type LinkName: string
:param LinkName: The name of link that is used to attach this object to a parent.
:rtype: dict
:return: {
'ObjectIdentifier': 'string'
}
"""
pass
def create_schema(Name=None):
"""
Creates a new schema in a development state. A schema can exist in three phases:
See also: AWS API Documentation
:example: response = client.create_schema(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]
The name that is associated with the schema. This is unique to each account and in each region.
:rtype: dict
:return: {
'SchemaArn': 'string'
}
"""
pass
def create_typed_link_facet(SchemaArn=None, Facet=None):
"""
Creates a TypedLinkFacet . For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.create_typed_link_facet(
SchemaArn='string',
Facet={
'Name': 'string',
'Attributes': [
{
'Name': 'string',
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
'IdentityAttributeOrder': [
'string',
]
}
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type Facet: dict
:param Facet: [REQUIRED]
Facet structure that is associated with the typed link facet.
Name (string) -- [REQUIRED]The unique name of the typed link facet.
Attributes (list) -- [REQUIRED]An ordered set of attributes that are associate with the typed link. You can use typed link attributes when you need to represent the relationship between two objects or allow for quick filtering of incoming or outgoing typed links.
(dict) --A typed link attribute definition.
Name (string) -- [REQUIRED]The unique name of the typed link attribute.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules that are attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
RequiredBehavior (string) -- [REQUIRED]The required behavior of the TypedLinkAttributeDefinition .
IdentityAttributeOrder (list) -- [REQUIRED]A range filter that you provide for multiple attributes. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.
(string) --
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def delete_directory(DirectoryArn=None):
"""
Deletes a directory. Only disabled directories can be deleted. A deleted directory cannot be undone. Exercise extreme caution when deleting directories.
See also: AWS API Documentation
:example: response = client.delete_directory(
DirectoryArn='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory to delete.
:rtype: dict
:return: {
'DirectoryArn': 'string'
}
"""
pass
def delete_facet(SchemaArn=None, Name=None):
"""
Deletes a given Facet . All attributes and Rule s that are associated with the facet will be deleted. Only development schema facets are allowed deletion.
See also: AWS API Documentation
:example: response = client.delete_facet(
SchemaArn='string',
Name='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Facet . For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the facet to delete.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def delete_object(DirectoryArn=None, ObjectReference=None):
"""
Deletes an object and its associated attributes. Only objects with no children and no parents can be deleted.
See also: AWS API Documentation
:example: response = client.delete_object(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
A reference that identifies the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def delete_schema(SchemaArn=None):
"""
Deletes a given schema. Schemas in a development and published state can only be deleted.
See also: AWS API Documentation
:example: response = client.delete_schema(
SchemaArn='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) of the development schema. For more information, see arns .
:rtype: dict
:return: {
'SchemaArn': 'string'
}
"""
pass
def delete_typed_link_facet(SchemaArn=None, Name=None):
"""
Deletes a TypedLinkFacet . For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.delete_typed_link_facet(
SchemaArn='string',
Name='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The unique name of the typed link facet.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def detach_from_index(DirectoryArn=None, IndexReference=None, TargetReference=None):
"""
Detaches the specified object from the specified index.
See also: AWS API Documentation
:example: response = client.detach_from_index(
DirectoryArn='string',
IndexReference={
'Selector': 'string'
},
TargetReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory the index and object exist in.
:type IndexReference: dict
:param IndexReference: [REQUIRED]
A reference to the index object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type TargetReference: dict
:param TargetReference: [REQUIRED]
A reference to the object being detached from the index.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {
'DetachedObjectIdentifier': 'string'
}
"""
pass
def detach_object(DirectoryArn=None, ParentReference=None, LinkName=None):
"""
Detaches a given object from the parent object. The object that is to be detached from the parent is specified by the link name.
See also: AWS API Documentation
:example: response = client.detach_object(
DirectoryArn='string',
ParentReference={
'Selector': 'string'
},
LinkName='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns .
:type ParentReference: dict
:param ParentReference: [REQUIRED]
The parent reference from which the object with the specified link name is detached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type LinkName: string
:param LinkName: [REQUIRED]
The link name associated with the object that needs to be detached.
:rtype: dict
:return: {
'DetachedObjectIdentifier': 'string'
}
"""
pass
def detach_policy(DirectoryArn=None, PolicyReference=None, ObjectReference=None):
"""
Detaches a policy from an object.
See also: AWS API Documentation
:example: response = client.detach_policy(
DirectoryArn='string',
PolicyReference={
'Selector': 'string'
},
ObjectReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where both objects reside. For more information, see arns .
:type PolicyReference: dict
:param PolicyReference: [REQUIRED]
Reference that identifies the policy object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
Reference that identifies the object whose policy object will be detached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def detach_typed_link(DirectoryArn=None, TypedLinkSpecifier=None):
"""
Detaches a typed link from a specified source and target object. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.detach_typed_link(
DirectoryArn='string',
TypedLinkSpecifier={
'TypedLinkFacet': {
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
'SourceObjectReference': {
'Selector': 'string'
},
'TargetObjectReference': {
'Selector': 'string'
},
'IdentityAttributeValues': [
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where you want to detach the typed link.
:type TypedLinkSpecifier: dict
:param TypedLinkSpecifier: [REQUIRED]
Used to accept a typed link specifier as input.
TypedLinkFacet (dict) -- [REQUIRED]Identifies the typed link facet that is associated with the typed link.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique name of the typed link facet.
SourceObjectReference (dict) -- [REQUIRED]Identifies the source object that the typed link will attach to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
TargetObjectReference (dict) -- [REQUIRED]Identifies the target object that the typed link will attach to.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
IdentityAttributeValues (list) -- [REQUIRED]Identifies the attribute value to update.
(dict) --Identifies the attribute name and value for a typed link.
AttributeName (string) -- [REQUIRED]The attribute name of the typed link.
Value (dict) -- [REQUIRED]The value for the typed link.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
"""
pass
def disable_directory(DirectoryArn=None):
"""
Disables the specified directory. Disabled directories cannot be read or written to. Only enabled directories can be disabled. Disabled directories may be reenabled.
See also: AWS API Documentation
:example: response = client.disable_directory(
DirectoryArn='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory to disable.
:rtype: dict
:return: {
'DirectoryArn': 'string'
}
"""
pass
def enable_directory(DirectoryArn=None):
"""
Enables the specified directory. Only disabled directories can be enabled. Once enabled, the directory can then be read and written to.
See also: AWS API Documentation
:example: response = client.enable_directory(
DirectoryArn='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory to enable.
:rtype: dict
:return: {
'DirectoryArn': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_directory(DirectoryArn=None):
"""
Retrieves metadata about a directory.
See also: AWS API Documentation
:example: response = client.get_directory(
DirectoryArn='string'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory.
:rtype: dict
:return: {
'Directory': {
'Name': 'string',
'DirectoryArn': 'string',
'State': 'ENABLED'|'DISABLED'|'DELETED',
'CreationDateTime': datetime(2015, 1, 1)
}
}
"""
pass
def get_facet(SchemaArn=None, Name=None):
"""
Gets details of the Facet , such as facet name, attributes, Rule s, or ObjectType . You can call this on all kinds of schema facets -- published, development, or applied.
See also: AWS API Documentation
:example: response = client.get_facet(
SchemaArn='string',
Name='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Facet . For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the facet to retrieve.
:rtype: dict
:return: {
'Facet': {
'Name': 'string',
'ObjectType': 'NODE'|'LEAF_NODE'|'POLICY'|'INDEX'
}
}
"""
pass
def get_object_information(DirectoryArn=None, ObjectReference=None, ConsistencyLevel=None):
"""
Retrieves metadata about an object.
See also: AWS API Documentation
:example: response = client.get_object_information(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory being retrieved.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
A reference to the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level at which to retrieve the object information.
:rtype: dict
:return: {
'SchemaFacets': [
{
'SchemaArn': 'string',
'FacetName': 'string'
},
],
'ObjectIdentifier': 'string'
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_schema_as_json(SchemaArn=None):
"""
Retrieves a JSON representation of the schema. See JSON Schema Format for more information.
See also: AWS API Documentation
:example: response = client.get_schema_as_json(
SchemaArn='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The ARN of the schema to retrieve.
:rtype: dict
:return: {
'Name': 'string',
'Document': 'string'
}
"""
pass
def get_typed_link_facet_information(SchemaArn=None, Name=None):
"""
Returns the identity attribute order for a specific TypedLinkFacet . For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.get_typed_link_facet_information(
SchemaArn='string',
Name='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The unique name of the typed link facet.
:rtype: dict
:return: {
'IdentityAttributeOrder': [
'string',
]
}
:returns:
(string) --
"""
pass
def get_waiter():
"""
"""
pass
def list_applied_schema_arns(DirectoryArn=None, NextToken=None, MaxResults=None):
"""
Lists schemas applied to a directory.
See also: AWS API Documentation
:example: response = client.list_applied_schema_arns(
DirectoryArn='string',
NextToken='string',
MaxResults=123
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory you are listing.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'SchemaArns': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_attached_indices(DirectoryArn=None, TargetReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Lists indices attached to an object.
See also: AWS API Documentation
:example: response = client.list_attached_indices(
DirectoryArn='string',
TargetReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory.
:type TargetReference: dict
:param TargetReference: [REQUIRED]
A reference to the object to that has indices attached.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level to use for this operation.
:rtype: dict
:return: {
'IndexAttachments': [
{
'IndexedAttributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ObjectIdentifier': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def list_development_schema_arns(NextToken=None, MaxResults=None):
"""
Retrieves each Amazon Resource Name (ARN) of schemas in the development state.
See also: AWS API Documentation
:example: response = client.list_development_schema_arns(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'SchemaArns': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_directories(NextToken=None, MaxResults=None, state=None):
"""
Lists directories created within an account.
See also: AWS API Documentation
:example: response = client.list_directories(
NextToken='string',
MaxResults=123,
state='ENABLED'|'DISABLED'|'DELETED'
)
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type state: string
:param state: The state of the directories in the list. Can be either Enabled, Disabled, or Deleted.
:rtype: dict
:return: {
'Directories': [
{
'Name': 'string',
'DirectoryArn': 'string',
'State': 'ENABLED'|'DISABLED'|'DELETED',
'CreationDateTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
"""
pass
def list_facet_attributes(SchemaArn=None, Name=None, NextToken=None, MaxResults=None):
"""
Retrieves attributes attached to the facet.
See also: AWS API Documentation
:example: response = client.list_facet_attributes(
SchemaArn='string',
Name='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The ARN of the schema where the facet resides.
:type Name: string
:param Name: [REQUIRED]
The name of the facet whose attributes will be retrieved.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'AttributeDefinition': {
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
}
},
'AttributeReference': {
'TargetFacetName': 'string',
'TargetAttributeName': 'string'
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_facet_names(SchemaArn=None, NextToken=None, MaxResults=None):
"""
Retrieves the names of facets that exist in a schema.
See also: AWS API Documentation
:example: response = client.list_facet_names(
SchemaArn='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) to retrieve facet names from.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'FacetNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_incoming_typed_links(DirectoryArn=None, ObjectReference=None, FilterAttributeRanges=None, FilterTypedLink=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns a paginated list of all the incoming TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.list_incoming_typed_links(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
FilterAttributeRanges=[
{
'AttributeName': 'string',
'Range': {
'StartMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'StartValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'EndMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'EndValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
],
FilterTypedLink={
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where you want to list the typed links.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
Reference that identifies the object whose attributes will be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type FilterAttributeRanges: list
:param FilterAttributeRanges: Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.
(dict) --Identifies the range of attributes that are used by a specified filter.
AttributeName (string) --The unique name of the typed link attribute.
Range (dict) -- [REQUIRED]The range of attribute values that are being selected.
StartMode (string) -- [REQUIRED]The inclusive or exclusive range start.
StartValue (dict) --The value to start the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
EndMode (string) -- [REQUIRED]The inclusive or exclusive range end.
EndValue (dict) --The attribute value to terminate the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type FilterTypedLink: dict
:param FilterTypedLink: Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique name of the typed link facet.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level to execute the request at.
:rtype: dict
:return: {
'LinkSpecifiers': [
{
'TypedLinkFacet': {
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
'SourceObjectReference': {
'Selector': 'string'
},
'TargetObjectReference': {
'Selector': 'string'
},
'IdentityAttributeValues': [
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
},
],
'NextToken': 'string'
}
:returns:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
"""
pass
def list_index(DirectoryArn=None, RangesOnIndexedValues=None, IndexReference=None, MaxResults=None, NextToken=None, ConsistencyLevel=None):
"""
Lists objects attached to the specified index.
See also: AWS API Documentation
:example: response = client.list_index(
DirectoryArn='string',
RangesOnIndexedValues=[
{
'AttributeKey': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Range': {
'StartMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'StartValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'EndMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'EndValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
],
IndexReference={
'Selector': 'string'
},
MaxResults=123,
NextToken='string',
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory that the index exists in.
:type RangesOnIndexedValues: list
:param RangesOnIndexedValues: Specifies the ranges of indexed values that you want to query.
(dict) --A range of attributes.
AttributeKey (dict) --The key of the attribute that the attribute range covers.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
Range (dict) --The range of attribute values being selected.
StartMode (string) -- [REQUIRED]The inclusive or exclusive range start.
StartValue (dict) --The value to start the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
EndMode (string) -- [REQUIRED]The inclusive or exclusive range end.
EndValue (dict) --The attribute value to terminate the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type IndexReference: dict
:param IndexReference: [REQUIRED]
The reference to the index to list.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve from the index.
:type NextToken: string
:param NextToken: The pagination token.
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level to execute the request at.
:rtype: dict
:return: {
'IndexAttachments': [
{
'IndexedAttributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'ObjectIdentifier': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def list_object_attributes(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None, FacetFilter=None):
"""
Lists all attributes that are associated with an object.
See also: AWS API Documentation
:example: response = client.list_object_attributes(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL',
FacetFilter={
'SchemaArn': 'string',
'FacetName': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object whose attributes will be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:type FacetFilter: dict
:param FacetFilter: Used to filter the list of object attributes that are associated with a certain facet.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
:rtype: dict
:return: {
'Attributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
],
'NextToken': 'string'
}
"""
pass
def list_object_children(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns a paginated list of child objects that are associated with a given object.
See also: AWS API Documentation
:example: response = client.list_object_children(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object for which child objects are being listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'Children': {
'string': 'string'
},
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_object_parent_paths(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None):
"""
Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects. For more information about objects, see Directory Structure .
Use this API to evaluate all parents for an object. The call returns all objects from the root of the directory up to the requested object. The API returns the number of paths based on user-defined MaxResults , in case there are multiple paths to the parent. The order of the paths and nodes returned is consistent among multiple API calls unless the objects are deleted or moved. Paths not leading to the directory root are ignored from the target object.
See also: AWS API Documentation
:example: response = client.list_object_parent_paths(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory to which the parent path applies.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object whose parent paths are listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:rtype: dict
:return: {
'PathToObjectIdentifiersList': [
{
'Path': 'string',
'ObjectIdentifiers': [
'string',
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_object_parents(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Lists parent objects that are associated with a given object in pagination fashion.
See also: AWS API Documentation
:example: response = client.list_object_parents(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object for which parent objects are being listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'Parents': {
'string': 'string'
},
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_object_policies(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns policies attached to an object in pagination fashion.
See also: AWS API Documentation
:example: response = client.list_object_policies(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
Reference that identifies the object for which policies will be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'AttachedPolicyIds': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_outgoing_typed_links(DirectoryArn=None, ObjectReference=None, FilterAttributeRanges=None, FilterTypedLink=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object. It also supports filtering by typed link facet and identity attributes. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.list_outgoing_typed_links(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
FilterAttributeRanges=[
{
'AttributeName': 'string',
'Range': {
'StartMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'StartValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'EndMode': 'FIRST'|'LAST'|'LAST_BEFORE_MISSING_VALUES'|'INCLUSIVE'|'EXCLUSIVE',
'EndValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
],
FilterTypedLink={
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) of the directory where you want to list the typed links.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
A reference that identifies the object whose attributes will be listed.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type FilterAttributeRanges: list
:param FilterAttributeRanges: Provides range filters for multiple attributes. When providing ranges to typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range.
(dict) --Identifies the range of attributes that are used by a specified filter.
AttributeName (string) --The unique name of the typed link attribute.
Range (dict) -- [REQUIRED]The range of attribute values that are being selected.
StartMode (string) -- [REQUIRED]The inclusive or exclusive range start.
StartValue (dict) --The value to start the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
EndMode (string) -- [REQUIRED]The inclusive or exclusive range end.
EndValue (dict) --The attribute value to terminate the range at.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:type FilterTypedLink: dict
:param FilterTypedLink: Filters are interpreted in the order of the attributes defined on the typed link facet, not the order they are supplied to any API calls.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
TypedLinkName (string) -- [REQUIRED]The unique name of the typed link facet.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level to execute the request at.
:rtype: dict
:return: {
'TypedLinkSpecifiers': [
{
'TypedLinkFacet': {
'SchemaArn': 'string',
'TypedLinkName': 'string'
},
'SourceObjectReference': {
'Selector': 'string'
},
'TargetObjectReference': {
'Selector': 'string'
},
'IdentityAttributeValues': [
{
'AttributeName': 'string',
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
},
],
'NextToken': 'string'
}
:returns:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An objects identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
"""
pass
def list_policy_attachments(DirectoryArn=None, PolicyReference=None, NextToken=None, MaxResults=None, ConsistencyLevel=None):
"""
Returns all of the ObjectIdentifiers to which a given policy is attached.
See also: AWS API Documentation
:example: response = client.list_policy_attachments(
DirectoryArn='string',
PolicyReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123,
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL'
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where objects reside. For more information, see arns .
:type PolicyReference: dict
:param PolicyReference: [REQUIRED]
The reference that identifies the policy object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:type ConsistencyLevel: string
:param ConsistencyLevel: Represents the manner and timing in which the successful write or update of an object is reflected in a subsequent read operation of that same object.
:rtype: dict
:return: {
'ObjectIdentifiers': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_published_schema_arns(NextToken=None, MaxResults=None):
"""
Retrieves each published schema Amazon Resource Name (ARN).
See also: AWS API Documentation
:example: response = client.list_published_schema_arns(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'SchemaArns': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_tags_for_resource(ResourceArn=None, NextToken=None, MaxResults=None):
"""
Returns tags for a resource. Tagging is currently supported only for directories with a limit of 50 tags per directory. All 50 tags are returned for a given directory with this API call.
See also: AWS API Documentation
:example: response = client.list_tags_for_resource(
ResourceArn='string',
NextToken='string',
MaxResults=123
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]
The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.
:type NextToken: string
:param NextToken: The pagination token. This is for future use. Currently pagination is not supported for tagging.
:type MaxResults: integer
:param MaxResults: The MaxResults parameter sets the maximum number of results returned in a single page. This is for future use and is not supported currently.
:rtype: dict
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextToken': 'string'
}
"""
pass
def list_typed_link_facet_attributes(SchemaArn=None, Name=None, NextToken=None, MaxResults=None):
"""
Returns a paginated list of all attribute definitions for a particular TypedLinkFacet . For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.list_typed_link_facet_attributes(
SchemaArn='string',
Name='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The unique name of the typed link facet.
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'Attributes': [
{
'Name': 'string',
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_typed_link_facet_names(SchemaArn=None, NextToken=None, MaxResults=None):
"""
Returns a paginated list of TypedLink facet names for a particular schema. For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.list_typed_link_facet_names(
SchemaArn='string',
NextToken='string',
MaxResults=123
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type NextToken: string
:param NextToken: The pagination token.
:type MaxResults: integer
:param MaxResults: The maximum number of results to retrieve.
:rtype: dict
:return: {
'FacetNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def lookup_policy(DirectoryArn=None, ObjectReference=None, NextToken=None, MaxResults=None):
"""
Lists all policies from the root of the Directory to the object specified. If there are no policies present, an empty list is returned. If policies are present, and if some objects don't have the policies attached, it returns the ObjectIdentifier for such objects. If policies are present, it returns ObjectIdentifier , policyId , and policyType . Paths that don't lead to the root from the target object are ignored. For more information, see Policies .
See also: AWS API Documentation
:example: response = client.lookup_policy(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
NextToken='string',
MaxResults=123
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory . For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
Reference that identifies the object whose policies will be looked up.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type NextToken: string
:param NextToken: The token to request the next page of results.
:type MaxResults: integer
:param MaxResults: The maximum number of items to be retrieved in a single call. This is an approximate number.
:rtype: dict
:return: {
'PolicyToPathList': [
{
'Path': 'string',
'Policies': [
{
'PolicyId': 'string',
'ObjectIdentifier': 'string',
'PolicyType': 'string'
},
]
},
],
'NextToken': 'string'
}
"""
pass
def publish_schema(DevelopmentSchemaArn=None, Version=None, Name=None):
"""
Publishes a development schema with a version. If description and attributes are specified, PublishSchema overrides the development schema description and attributes. If not, the development schema description and attributes are used.
See also: AWS API Documentation
:example: response = client.publish_schema(
DevelopmentSchemaArn='string',
Version='string',
Name='string'
)
:type DevelopmentSchemaArn: string
:param DevelopmentSchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the development schema. For more information, see arns .
:type Version: string
:param Version: [REQUIRED]
The version under which the schema will be published.
:type Name: string
:param Name: The new name under which the schema will be published. If this is not provided, the development schema is considered.
:rtype: dict
:return: {
'PublishedSchemaArn': 'string'
}
"""
pass
def put_schema_from_json(SchemaArn=None, Document=None):
"""
Allows a schema to be updated using JSON upload. Only available for development schemas. See JSON Schema Format for more information.
See also: AWS API Documentation
:example: response = client.put_schema_from_json(
SchemaArn='string',
Document='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The ARN of the schema to update.
:type Document: string
:param Document: [REQUIRED]
The replacement JSON schema.
:rtype: dict
:return: {
'Arn': 'string'
}
"""
pass
def remove_facet_from_object(DirectoryArn=None, SchemaFacet=None, ObjectReference=None):
"""
Removes the specified facet from the specified object.
See also: AWS API Documentation
:example: response = client.remove_facet_from_object(
DirectoryArn='string',
SchemaFacet={
'SchemaArn': 'string',
'FacetName': 'string'
},
ObjectReference={
'Selector': 'string'
}
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The ARN of the directory in which the object resides.
:type SchemaFacet: dict
:param SchemaFacet: [REQUIRED]
The facet to remove.
SchemaArn (string) --The ARN of the schema that contains the facet.
FacetName (string) --The name of the facet.
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
A reference to the object to remove the facet from.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def tag_resource(ResourceArn=None, Tags=None):
"""
An API operation for adding tags to a resource.
See also: AWS API Documentation
:example: response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]
The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.
:type Tags: list
:param Tags: [REQUIRED]
A list of tag key-value pairs.
(dict) --The tag structure that contains a tag key and value.
Key (string) --The key that is associated with the tag.
Value (string) --The value that is associated with the tag.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagKeys=None):
"""
An API operation for removing tags from a resource.
See also: AWS API Documentation
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]
The Amazon Resource Name (ARN) of the resource. Tagging is only supported for directories.
:type TagKeys: list
:param TagKeys: [REQUIRED]
Keys of the tag that need to be removed from the resource.
(string) --
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def update_facet(SchemaArn=None, Name=None, AttributeUpdates=None, ObjectType=None):
"""
Does the following:
See also: AWS API Documentation
:example: response = client.update_facet(
SchemaArn='string',
Name='string',
AttributeUpdates=[
{
'Attribute': {
'Name': 'string',
'AttributeDefinition': {
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
}
},
'AttributeReference': {
'TargetFacetName': 'string',
'TargetAttributeName': 'string'
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
'Action': 'CREATE_OR_UPDATE'|'DELETE'
},
],
ObjectType='NODE'|'LEAF_NODE'|'POLICY'|'INDEX'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Facet . For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the facet.
:type AttributeUpdates: list
:param AttributeUpdates: List of attributes that need to be updated in a given schema Facet . Each attribute is followed by AttributeAction , which specifies the type of update operation to perform.
(dict) --A structure that contains information used to update an attribute.
Attribute (dict) --The attribute to update.
Name (string) -- [REQUIRED]The name of the facet attribute.
AttributeDefinition (dict) --A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
AttributeReference (dict) --An attribute reference that is associated with the attribute. See Attribute References for more information.
TargetFacetName (string) -- [REQUIRED]The target facet name that is associated with the facet reference. See Attribute References for more information.
TargetAttributeName (string) -- [REQUIRED]The target attribute name that is associated with the facet reference. See Attribute References for more information.
RequiredBehavior (string) --The required behavior of the FacetAttribute .
Action (string) --The action to perform when updating the attribute.
:type ObjectType: string
:param ObjectType: The object type that is associated with the facet. See CreateFacetRequest$ObjectType for more details.
:rtype: dict
:return: {}
:returns:
SchemaArn (string) -- [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Facet . For more information, see arns .
Name (string) -- [REQUIRED]
The name of the facet.
AttributeUpdates (list) -- List of attributes that need to be updated in a given schema Facet . Each attribute is followed by AttributeAction , which specifies the type of update operation to perform.
(dict) --A structure that contains information used to update an attribute.
Attribute (dict) --The attribute to update.
Name (string) -- [REQUIRED]The name of the facet attribute.
AttributeDefinition (dict) --A facet attribute consists of either a definition or a reference. This structure contains the attribute definition. See Attribute References for more information.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
AttributeReference (dict) --An attribute reference that is associated with the attribute. See Attribute References for more information.
TargetFacetName (string) -- [REQUIRED]The target facet name that is associated with the facet reference. See Attribute References for more information.
TargetAttributeName (string) -- [REQUIRED]The target attribute name that is associated with the facet reference. See Attribute References for more information.
RequiredBehavior (string) --The required behavior of the FacetAttribute .
Action (string) --The action to perform when updating the attribute.
ObjectType (string) -- The object type that is associated with the facet. See CreateFacetRequest$ObjectType for more details.
"""
pass
def update_object_attributes(DirectoryArn=None, ObjectReference=None, AttributeUpdates=None):
"""
Updates a given object's attributes.
See also: AWS API Documentation
:example: response = client.update_object_attributes(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
AttributeUpdates=[
{
'ObjectAttributeKey': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'ObjectAttributeAction': {
'ObjectAttributeActionType': 'CREATE_OR_UPDATE'|'DELETE',
'ObjectAttributeUpdateValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
}
},
]
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the Directory where the object resides. For more information, see arns .
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]
The reference that identifies the object.
Selector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Accessing Objects . You can identify an object in one of the following ways:
$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object s identifier is immutable and no two objects will ever share the same object identifier
/some/path - Identifies the object based on path
#SomeBatchReference - Identifies the object in a batch call
:type AttributeUpdates: list
:param AttributeUpdates: [REQUIRED]
The attributes update structure.
(dict) --Structure that contains attribute update information.
ObjectAttributeKey (dict) --The key of the attribute being updated.
SchemaArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) -- [REQUIRED]The name of the facet that the attribute exists within.
Name (string) -- [REQUIRED]The name of the attribute.
ObjectAttributeAction (dict) --The action to perform as part of the attribute update.
ObjectAttributeActionType (string) --A type that can be either Update or Delete .
ObjectAttributeUpdateValue (dict) --The value that you want to update to.
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
:rtype: dict
:return: {
'ObjectIdentifier': 'string'
}
"""
pass
def update_schema(SchemaArn=None, Name=None):
"""
Updates the schema name with a new name. Only development schema names can be updated.
See also: AWS API Documentation
:example: response = client.update_schema(
SchemaArn='string',
Name='string'
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) of the development schema. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The name of the schema.
:rtype: dict
:return: {
'SchemaArn': 'string'
}
"""
pass
def update_typed_link_facet(SchemaArn=None, Name=None, AttributeUpdates=None, IdentityAttributeOrder=None):
"""
Updates a TypedLinkFacet . For more information, see Typed link .
See also: AWS API Documentation
:example: response = client.update_typed_link_facet(
SchemaArn='string',
Name='string',
AttributeUpdates=[
{
'Attribute': {
'Name': 'string',
'Type': 'STRING'|'BINARY'|'BOOLEAN'|'NUMBER'|'DATETIME',
'DefaultValue': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
},
'IsImmutable': True|False,
'Rules': {
'string': {
'Type': 'BINARY_LENGTH'|'NUMBER_COMPARISON'|'STRING_FROM_SET'|'STRING_LENGTH',
'Parameters': {
'string': 'string'
}
}
},
'RequiredBehavior': 'REQUIRED_ALWAYS'|'NOT_REQUIRED'
},
'Action': 'CREATE_OR_UPDATE'|'DELETE'
},
],
IdentityAttributeOrder=[
'string',
]
)
:type SchemaArn: string
:param SchemaArn: [REQUIRED]
The Amazon Resource Name (ARN) that is associated with the schema. For more information, see arns .
:type Name: string
:param Name: [REQUIRED]
The unique name of the typed link facet.
:type AttributeUpdates: list
:param AttributeUpdates: [REQUIRED]
Attributes update structure.
(dict) --A typed link facet attribute update.
Attribute (dict) -- [REQUIRED]The attribute to update.
Name (string) -- [REQUIRED]The unique name of the typed link attribute.
Type (string) -- [REQUIRED]The type of the attribute.
DefaultValue (dict) --The default value of the attribute (if configured).
StringValue (string) --A string data value.
BinaryValue (bytes) --A binary data value.
BooleanValue (boolean) --A Boolean data value.
NumberValue (string) --A number data value.
DatetimeValue (datetime) --A date and time value.
IsImmutable (boolean) --Whether the attribute is mutable or not.
Rules (dict) --Validation rules that are attached to the attribute definition.
(string) --
(dict) --Contains an Amazon Resource Name (ARN) and parameters that are associated with the rule.
Type (string) --The type of attribute validation rule.
Parameters (dict) --The minimum and maximum parameters that are associated with the rule.
(string) --
(string) --
RequiredBehavior (string) -- [REQUIRED]The required behavior of the TypedLinkAttributeDefinition .
Action (string) -- [REQUIRED]The action to perform when updating the attribute.
:type IdentityAttributeOrder: list
:param IdentityAttributeOrder: [REQUIRED]
A range filter that you provide for multiple attributes. The ability to filter typed links considers the order that the attributes are defined on the typed link facet. When providing ranges to a typed link selection, any inexact ranges must be specified at the end. Any attributes that do not have a range specified are presumed to match the entire range. Filters are interpreted in the order of the attributes on the typed link facet, not the order in which they are supplied to any API calls.
(string) --
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
| 42.435423
| 547
| 0.611136
| 17,237
| 158,369
| 5.596856
| 0.035679
| 0.012231
| 0.021457
| 0.013496
| 0.870409
| 0.848538
| 0.833321
| 0.820281
| 0.802971
| 0.782986
| 0
| 0.002342
| 0.320442
| 158,369
| 3,731
| 548
| 42.446797
| 0.894073
| 0.876333
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
2a5b5bfa1af91d08020455db80f4ecce29786656
| 3,713
|
py
|
Python
|
tests/test_algorithms.py
|
subhamgcon/fliscopt
|
8bd33b1313e12da4ce1a67d21662709c11e33d58
|
[
"MIT"
] | null | null | null |
tests/test_algorithms.py
|
subhamgcon/fliscopt
|
8bd33b1313e12da4ce1a67d21662709c11e33d58
|
[
"MIT"
] | null | null | null |
tests/test_algorithms.py
|
subhamgcon/fliscopt
|
8bd33b1313e12da4ce1a67d21662709c11e33d58
|
[
"MIT"
] | null | null | null |
try:
import fliscopt
except:
import sys
sys.path.append("..")
import unittest
from fliscopt.rs import RandomSearch
from fliscopt.sa import SimulatedAnnealing
from fliscopt.ga import GA
from fliscopt.hc import HillClimb
from fliscopt.utils.util import read_file
from fliscopt.fitness import griewank,domain
class TestAlgorithms(unittest.TestCase):
def test_rs(self):
rs = RandomSearch(max_time=0.00001)
res=rs.run(domain=domain['griewank']*5,fitness_function=griewank,seed=5)
print(type(res[4]),type(res[3]),type(res[2]))
self.assertIsNotNone(res[0],msg="Best sol returned None, expected output of type List")
self.assertEqual(len(res[0]),5,msg="Best sol output length not matching length :{} of input soln. Refer fitness_fn soln length".format(5))
self.assertIsNotNone(res[1],msg="Best cost returned None, expected output of type float/int")
self.assertIsNotNone(res[2],msg="Scores returned None, expected output of type List")
self.assertIsNotNone(res[3],msg="Nfe returned None, expected output of type Int")
self.assertIsNotNone(res[4],msg="Seed returned None, expected output of type float")
del res
def test_sa(self):
sa = SimulatedAnnealing(max_time=0.0003,temperature=50000.0,seed_init=False)
res=sa.run(domain=domain['griewank']*5,fitness_function=griewank,seed=5)
self.assertIsNotNone(res[0],msg="Best sol returned None, expected output of type List")
self.assertEqual(len(res[0]),5,msg="Best sol output length not matching length :{} of input soln. Refer fitness_fn soln length".format(5))
self.assertIsNotNone(res[1],msg="Best cost returned None, expected output of type float/int")
self.assertIsNotNone(res[2],msg="Scores returned None, expected output of type List")
self.assertIsNotNone(res[3],msg="Nfe returned None, expected output of type Int")
self.assertIsNotNone(res[4],msg="Seed returned None, expected output of type float")
del res
def test_ga(self):
ga = GA(seed_init=False,search=False)
res=ga.run(domain=domain['griewank']*5,fitness_function=griewank,seed=5)
self.assertIsNotNone(res[0],msg="Best sol returned None, expected output of type List")
self.assertEqual(len(res[0]),5,msg="Best sol output length not matching length :{} of input soln. Refer fitness_fn soln length".format(5))
self.assertIsNotNone(res[1],msg="Best cost returned None, expected output of type float/int")
self.assertIsNotNone(res[2],msg="Scores returned None, expected output of type List")
self.assertIsNotNone(res[3],msg="Nfe returned None, expected output of type Int")
self.assertIsNotNone(res[4],msg="Seed returned None, expected output of type float")
del res
def test_hc(self):
hc = HillClimb(seed_init=False,max_time=0.0000001)
res=hc.run(domain=domain['griewank']*5,fitness_function=griewank,seed=5)
self.assertIsNotNone(res[0],msg="Best sol returned None, expected output of type List")
self.assertEqual(len(res[0]),5,msg="Best sol output length not matching length :{} of input soln. Refer fitness_fn soln length".format(5))
self.assertIsNotNone(res[1],msg="Best cost returned None, expected output of type float/int")
self.assertIsNotNone(res[2],msg="Scores returned None, expected output of type List")
self.assertIsNotNone(res[3],msg="Nfe returned None, expected output of type Int")
self.assertIsNotNone(res[4],msg="Seed returned None, expected output of type float")
del res
if __name__ == '__main__':
read_file('flights.txt')
unittest.main()
| 57.123077
| 148
| 0.708322
| 541
| 3,713
| 4.809612
| 0.151571
| 0.146042
| 0.169101
| 0.199846
| 0.782859
| 0.782859
| 0.782859
| 0.782859
| 0.782859
| 0.782859
| 0
| 0.022244
| 0.176677
| 3,713
| 64
| 149
| 58.015625
| 0.828917
| 0
| 0
| 0.491228
| 0
| 0
| 0.387019
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 1
| 0.070175
| false
| 0
| 0.157895
| 0
| 0.245614
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aab789c9110a03b7ab21f5e849da7371542063c2
| 19,554
|
py
|
Python
|
sdk/python/pulumi_azure/keyvault/key_vault.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/keyvault/key_vault.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/keyvault/key_vault.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class KeyVault(pulumi.CustomResource):
access_policies: pulumi.Output[list]
"""
A list of up to 16 objects describing access policies, as described below.
* `application_id` (`str`) - The object ID of an Application in Azure Active Directory.
* `certificate_permissions` (`list`) - List of certificate permissions, must be one or more from the following: `backup`, `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, `managecontacts`, `manageissuers`, `purge`, `recover`, `restore`, `setissuers` and `update`.
* `key_permissions` (`list`) - List of key permissions, must be one or more from the following: `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`, `import`, `list`, `purge`, `recover`, `restore`, `sign`, `unwrapKey`, `update`, `verify` and `wrapKey`.
* `object_id` (`str`) - The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. The object ID must be unique for the list of access policies.
* `secret_permissions` (`list`) - List of secret permissions, must be one or more from the following: `backup`, `delete`, `get`, `list`, `purge`, `recover`, `restore` and `set`.
* `storage_permissions` (`list`) - List of storage permissions, must be one or more from the following: `backup`, `delete`, `deletesas`, `get`, `getsas`, `list`, `listsas`, `purge`, `recover`, `regeneratekey`, `restore`, `set`, `setsas` and `update`.
* `tenant_id` (`str`) - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. Must match the `tenant_id` used above.
"""
enabled_for_deployment: pulumi.Output[bool]
"""
Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. Defaults to `false`.
"""
enabled_for_disk_encryption: pulumi.Output[bool]
"""
Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. Defaults to `false`.
"""
enabled_for_template_deployment: pulumi.Output[bool]
"""
Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. Defaults to `false`.
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
Specifies the name of the Key Vault. Changing this forces a new resource to be created.
"""
network_acls: pulumi.Output[dict]
"""
A `network_acls` block as defined below.
* `bypass` (`str`) - Specifies which traffic can bypass the network rules. Possible values are `AzureServices` and `None`.
* `default_action` (`str`) - The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
* `ip_rules` (`list`) - One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault.
* `virtual_network_subnet_ids` (`list`) - One or more Subnet ID's which should be able to access this Key Vault.
"""
purge_protection_enabled: pulumi.Output[bool]
"""
Is Purge Protection enabled for this Key Vault? Defaults to `false`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the Key Vault. Changing this forces a new resource to be created.
"""
sku_name: pulumi.Output[str]
"""
The Name of the SKU used for this Key Vault. Possible values are `standard` and `premium`.
"""
soft_delete_enabled: pulumi.Output[bool]
"""
Should Soft Delete be enabled for this Key Vault? Defaults to `false`.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
tenant_id: pulumi.Output[str]
"""
The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
"""
vault_uri: pulumi.Output[str]
"""
The URI of the Key Vault, used for performing operations on keys and secrets.
"""
def __init__(__self__, resource_name, opts=None, access_policies=None, enabled_for_deployment=None, enabled_for_disk_encryption=None, enabled_for_template_deployment=None, location=None, name=None, network_acls=None, purge_protection_enabled=None, resource_group_name=None, sku_name=None, soft_delete_enabled=None, tags=None, tenant_id=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Key Vault.
## Disclaimers
> **Note:** It's possible to define Key Vault Access Policies both within the `keyvault.KeyVault` resource via the `access_policy` block and by using the `keyvault.AccessPolicy` resource. However it's not possible to use both methods to manage Access Policies within a KeyVault, since there'll be conflicts.
> **Note:** This provi will automatically recover a soft-deleted Key Vault during Creation if one is found - you can opt out of this using the `features` configuration within the Provider configuration block.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
enabled_for_disk_encryption=True,
tenant_id=current.tenant_id,
soft_delete_enabled=True,
purge_protection_enabled=False,
sku_name="standard",
access_policy=[{
"tenantId": current.tenant_id,
"objectId": current.object_id,
"keyPermissions": ["get"],
"secretPermissions": ["get"],
"storagePermissions": ["get"],
}],
network_acls={
"defaultAction": "Deny",
"bypass": "AzureServices",
},
tags={
"environment": "Testing",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] access_policies: A list of up to 16 objects describing access policies, as described below.
:param pulumi.Input[bool] enabled_for_deployment: Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. Defaults to `false`.
:param pulumi.Input[bool] enabled_for_disk_encryption: Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. Defaults to `false`.
:param pulumi.Input[bool] enabled_for_template_deployment: Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. Defaults to `false`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[dict] network_acls: A `network_acls` block as defined below.
:param pulumi.Input[bool] purge_protection_enabled: Is Purge Protection enabled for this Key Vault? Defaults to `false`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] sku_name: The Name of the SKU used for this Key Vault. Possible values are `standard` and `premium`.
:param pulumi.Input[bool] soft_delete_enabled: Should Soft Delete be enabled for this Key Vault? Defaults to `false`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] tenant_id: The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
The **access_policies** object supports the following:
* `application_id` (`pulumi.Input[str]`) - The object ID of an Application in Azure Active Directory.
* `certificate_permissions` (`pulumi.Input[list]`) - List of certificate permissions, must be one or more from the following: `backup`, `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, `managecontacts`, `manageissuers`, `purge`, `recover`, `restore`, `setissuers` and `update`.
* `key_permissions` (`pulumi.Input[list]`) - List of key permissions, must be one or more from the following: `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`, `import`, `list`, `purge`, `recover`, `restore`, `sign`, `unwrapKey`, `update`, `verify` and `wrapKey`.
* `object_id` (`pulumi.Input[str]`) - The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. The object ID must be unique for the list of access policies.
* `secret_permissions` (`pulumi.Input[list]`) - List of secret permissions, must be one or more from the following: `backup`, `delete`, `get`, `list`, `purge`, `recover`, `restore` and `set`.
* `storage_permissions` (`pulumi.Input[list]`) - List of storage permissions, must be one or more from the following: `backup`, `delete`, `deletesas`, `get`, `getsas`, `list`, `listsas`, `purge`, `recover`, `regeneratekey`, `restore`, `set`, `setsas` and `update`.
* `tenant_id` (`pulumi.Input[str]`) - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. Must match the `tenant_id` used above.
The **network_acls** object supports the following:
* `bypass` (`pulumi.Input[str]`) - Specifies which traffic can bypass the network rules. Possible values are `AzureServices` and `None`.
* `default_action` (`pulumi.Input[str]`) - The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
* `ip_rules` (`pulumi.Input[list]`) - One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault.
* `virtual_network_subnet_ids` (`pulumi.Input[list]`) - One or more Subnet ID's which should be able to access this Key Vault.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_policies'] = access_policies
__props__['enabled_for_deployment'] = enabled_for_deployment
__props__['enabled_for_disk_encryption'] = enabled_for_disk_encryption
__props__['enabled_for_template_deployment'] = enabled_for_template_deployment
__props__['location'] = location
__props__['name'] = name
__props__['network_acls'] = network_acls
__props__['purge_protection_enabled'] = purge_protection_enabled
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if sku_name is None:
raise TypeError("Missing required property 'sku_name'")
__props__['sku_name'] = sku_name
__props__['soft_delete_enabled'] = soft_delete_enabled
__props__['tags'] = tags
if tenant_id is None:
raise TypeError("Missing required property 'tenant_id'")
__props__['tenant_id'] = tenant_id
__props__['vault_uri'] = None
super(KeyVault, __self__).__init__(
'azure:keyvault/keyVault:KeyVault',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, access_policies=None, enabled_for_deployment=None, enabled_for_disk_encryption=None, enabled_for_template_deployment=None, location=None, name=None, network_acls=None, purge_protection_enabled=None, resource_group_name=None, sku_name=None, soft_delete_enabled=None, tags=None, tenant_id=None, vault_uri=None):
"""
Get an existing KeyVault resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] access_policies: A list of up to 16 objects describing access policies, as described below.
:param pulumi.Input[bool] enabled_for_deployment: Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. Defaults to `false`.
:param pulumi.Input[bool] enabled_for_disk_encryption: Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. Defaults to `false`.
:param pulumi.Input[bool] enabled_for_template_deployment: Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. Defaults to `false`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[dict] network_acls: A `network_acls` block as defined below.
:param pulumi.Input[bool] purge_protection_enabled: Is Purge Protection enabled for this Key Vault? Defaults to `false`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Key Vault. Changing this forces a new resource to be created.
:param pulumi.Input[str] sku_name: The Name of the SKU used for this Key Vault. Possible values are `standard` and `premium`.
:param pulumi.Input[bool] soft_delete_enabled: Should Soft Delete be enabled for this Key Vault? Defaults to `false`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] tenant_id: The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
:param pulumi.Input[str] vault_uri: The URI of the Key Vault, used for performing operations on keys and secrets.
The **access_policies** object supports the following:
* `application_id` (`pulumi.Input[str]`) - The object ID of an Application in Azure Active Directory.
* `certificate_permissions` (`pulumi.Input[list]`) - List of certificate permissions, must be one or more from the following: `backup`, `create`, `delete`, `deleteissuers`, `get`, `getissuers`, `import`, `list`, `listissuers`, `managecontacts`, `manageissuers`, `purge`, `recover`, `restore`, `setissuers` and `update`.
* `key_permissions` (`pulumi.Input[list]`) - List of key permissions, must be one or more from the following: `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`, `import`, `list`, `purge`, `recover`, `restore`, `sign`, `unwrapKey`, `update`, `verify` and `wrapKey`.
* `object_id` (`pulumi.Input[str]`) - The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. The object ID must be unique for the list of access policies.
* `secret_permissions` (`pulumi.Input[list]`) - List of secret permissions, must be one or more from the following: `backup`, `delete`, `get`, `list`, `purge`, `recover`, `restore` and `set`.
* `storage_permissions` (`pulumi.Input[list]`) - List of storage permissions, must be one or more from the following: `backup`, `delete`, `deletesas`, `get`, `getsas`, `list`, `listsas`, `purge`, `recover`, `regeneratekey`, `restore`, `set`, `setsas` and `update`.
* `tenant_id` (`pulumi.Input[str]`) - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. Must match the `tenant_id` used above.
The **network_acls** object supports the following:
* `bypass` (`pulumi.Input[str]`) - Specifies which traffic can bypass the network rules. Possible values are `AzureServices` and `None`.
* `default_action` (`pulumi.Input[str]`) - The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
* `ip_rules` (`pulumi.Input[list]`) - One or more IP Addresses, or CIDR Blocks which should be able to access the Key Vault.
* `virtual_network_subnet_ids` (`pulumi.Input[list]`) - One or more Subnet ID's which should be able to access this Key Vault.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_policies"] = access_policies
__props__["enabled_for_deployment"] = enabled_for_deployment
__props__["enabled_for_disk_encryption"] = enabled_for_disk_encryption
__props__["enabled_for_template_deployment"] = enabled_for_template_deployment
__props__["location"] = location
__props__["name"] = name
__props__["network_acls"] = network_acls
__props__["purge_protection_enabled"] = purge_protection_enabled
__props__["resource_group_name"] = resource_group_name
__props__["sku_name"] = sku_name
__props__["soft_delete_enabled"] = soft_delete_enabled
__props__["tags"] = tags
__props__["tenant_id"] = tenant_id
__props__["vault_uri"] = vault_uri
return KeyVault(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 72.69145
| 392
| 0.695561
| 2,559
| 19,554
| 5.128957
| 0.113716
| 0.041067
| 0.032914
| 0.018286
| 0.802819
| 0.791771
| 0.786438
| 0.775162
| 0.754743
| 0.750857
| 0
| 0.000452
| 0.208346
| 19,554
| 268
| 393
| 72.962687
| 0.847416
| 0.525468
| 0
| 0.023256
| 1
| 0
| 0.167071
| 0.052727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.011628
| 0.069767
| 0.023256
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aac59a8cadfe055e86a53844866466417b6a3474
| 1,937
|
py
|
Python
|
src/c3nav/mapdata/migrations/0063_auto_20170508_1404.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | 1
|
2021-07-07T06:16:40.000Z
|
2021-07-07T06:16:40.000Z
|
src/c3nav/mapdata/migrations/0063_auto_20170508_1404.py
|
0ki/c3nav
|
18fdb34b3fbcf7eb4617794750494cfa16428c54
|
[
"Apache-2.0"
] | null | null | null |
src/c3nav/mapdata/migrations/0063_auto_20170508_1404.py
|
0ki/c3nav
|
18fdb34b3fbcf7eb4617794750494cfa16428c54
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-08 14:04
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0062_auto_20170508_1400'),
]
operations = [
migrations.AlterField(
model_name='arealocation',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='building',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='door',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='hole',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='lineobstacle',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polyline'),
),
migrations.AlterField(
model_name='obstacle',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='space',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
migrations.AlterField(
model_name='stair',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polyline'),
),
migrations.AlterField(
model_name='stuffedarea',
name='geometry',
field=c3nav.mapdata.fields.GeometryField(geomtype='polygon'),
),
]
| 31.241935
| 74
| 0.581311
| 166
| 1,937
| 6.680723
| 0.307229
| 0.108206
| 0.162308
| 0.235347
| 0.722272
| 0.722272
| 0.722272
| 0.722272
| 0.722272
| 0.665464
| 0
| 0.031618
| 0.297883
| 1,937
| 61
| 75
| 31.754098
| 0.783824
| 0.035106
| 0
| 0.666667
| 1
| 0
| 0.126474
| 0.012326
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ab544fc3881ea7aea6c9dd7f7280a01ff88cc26
| 337
|
py
|
Python
|
investments/contrib/payments/constants.py
|
gatsinski/investments
|
4b903d6d9379993501e0c5fff2a93ddcaa2437ea
|
[
"MIT"
] | null | null | null |
investments/contrib/payments/constants.py
|
gatsinski/investments
|
4b903d6d9379993501e0c5fff2a93ddcaa2437ea
|
[
"MIT"
] | null | null | null |
investments/contrib/payments/constants.py
|
gatsinski/investments
|
4b903d6d9379993501e0c5fff2a93ddcaa2437ea
|
[
"MIT"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
MOHTLY = "monthly"
QUARTERLY = "quarterly"
SEMIANNUAL = "semiannual"
ANNUAL = "annual"
SPECIAL = "special"
DIVIDEND_TYPES = (
(MOHTLY, _("Monthly")),
(QUARTERLY, _("Quarterly")),
(SEMIANNUAL, _("Semiannual")),
(ANNUAL, _("Annual")),
(SPECIAL, _("Special")),
)
| 21.0625
| 54
| 0.652819
| 30
| 337
| 7.066667
| 0.533333
| 0.122642
| 0.207547
| 0.292453
| 0.726415
| 0.726415
| 0.726415
| 0.726415
| 0.726415
| 0.726415
| 0
| 0
| 0.169139
| 337
| 15
| 55
| 22.466667
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0.231454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2af26b30db0c70beb52cd99739e7056e0d3d0838
| 9,257
|
py
|
Python
|
cellpack/mgl_tools/DejaVu/PropertyEditor.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
cellpack/mgl_tools/DejaVu/PropertyEditor.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | 21
|
2021-10-02T00:07:05.000Z
|
2022-03-30T00:02:10.000Z
|
cellpack/mgl_tools/DejaVu/PropertyEditor.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/DejaVu/PropertyEditor.py,v 1.5 2007/07/24 17:30:41 vareille Exp $
#
# $Id: PropertyEditor.py,v 1.5 2007/07/24 17:30:41 vareille Exp $
#
import tkinter
import cellpack.mgl_tools.oldnumeric as Numeric
from . import Slider, ColorChooser, colorTool
from .EventHandler import CallbackFunctions
class MaterialEditor(CallbackFunctions):
"""Class for a material editor"""
property = ["ambient", "diffuse", "specular", "emission"]
def Callbacks(self, event=None):
"""Implement callback functions"""
if type(event) == type(0.0):
for f in self.callbacks:
f("shininess", event)
else:
tkrgb = self.colorChooser.hsWheel.Get(mode="TkRGB")
c = self.currentComponent.get()
self.mat[c] = self.colorChooser.hsWheel.Get(mode="RGB")[:3]
self.colw[c].config(background=tkrgb)
for f in self.callbacks:
f(self.property[c], self.mat[c])
def RestoreColor(self):
"""Set the wheel cursor to the color of the current component"""
c = self.currentComponent.get()
self.colorChooser.Set(self.mat[c], "RGB")
def Set(self, ambi=None, diff=None, spec=None, emis=None, shini=None, mode="RGB"):
"""Set the material Editor to a given material"""
assert mode in ("HSV", "RGB")
c = self.currentComponent.get()
if ambi:
ambi = colorTool.OneColor(ambi)
tkrgb = colorTool.TkColor(ambi[:3])
self.colw[0].config(background=tkrgb)
self.mat[0][:3] = ambi[:3]
if c == 0:
self.colorChooser.Set(ambi, "RGB")
if diff:
diff = colorTool.OneColor(diff)
tkrgb = colorTool.TkColor(diff[:3])
self.colw[1].config(background=tkrgb)
self.mat[1][:3] = diff[:3]
if c == 1:
self.colorChooser.Set(diff, "RGB")
if spec:
spec = colorTool.OneColor(spec)
tkrgb = colorTool.TkColor(spec[:3])
self.colw[2].config(background=tkrgb)
self.mat[2][:3] = spec[:3]
if c == 2:
self.colorChooser.Set(spec, "RGB")
if emis:
emis = colorTool.OneColor(emis)
tkrgb = colorTool.TkColor(emis[:3])
self.colw[3].config(background=tkrgb)
self.mat[3][:3] = emis[:3]
if c == 3:
self.colorChooser.Set(emis, "RGB")
if shini:
self.shini.Set(shini)
def __init__(self, root=None, colorChooser=None):
CallbackFunctions.__init__(self)
self.frame = tkinter.Frame(root, relief=tkinter.RIDGE, borderwidth=3)
self.mat = Numeric.ones((4, 3), "f")
self.currentComponent = tkinter.IntVar()
self.currentComponent.set(0)
self.colw = [
0,
] * 4
width = 9
self.colw[0] = tkinter.Radiobutton(
self.frame,
text="Ambient",
variable=self.currentComponent,
value=0,
command=self.RestoreColor,
relief=tkinter.SUNKEN,
width=width,
borderwidth=3,
background="#FFFFFF",
foreground="#000000",
)
self.colw[0].grid(row=0, column=0)
self.colw[1] = tkinter.Radiobutton(
self.frame,
text="Diffuse",
value=1,
variable=self.currentComponent,
command=self.RestoreColor,
borderwidth=3,
width=width,
relief=tkinter.SUNKEN,
background="#FFFFFF",
foreground="#000000",
)
self.colw[1].grid(row=1, column=0)
self.colw[2] = tkinter.Radiobutton(
self.frame,
text="Specular",
value=2,
variable=self.currentComponent,
command=self.RestoreColor,
relief=tkinter.SUNKEN,
width=width,
borderwidth=3,
background="#FFFFFF",
foreground="#000000",
)
self.colw[2].grid(row=0, column=1)
self.colw[3] = tkinter.Radiobutton(
self.frame,
text="Emissive",
value=3,
variable=self.currentComponent,
command=self.RestoreColor,
relief=tkinter.SUNKEN,
width=width,
borderwidth=3,
background="#FFFFFF",
foreground="#000000",
)
self.colw[3].grid(row=1, column=1)
self.shini = Slider.Slider(
self.frame,
label="Shininess",
immediate=0,
minval=0.0,
maxval=128.0,
init=30.0,
)
self.shini.frame.grid(row=2, column=0, columnspan=2)
self.shini.AddCallback(self.Callbacks)
if not colorChooser:
self.colorChooser = ColorChooser.ColorChooser(self.frame)
self.colorChooser.frame.grid(row=3, column=0, columnspan=2)
else:
self.colorChooser = colorChooser
self.colorChooser.AddCallback(self.Callbacks)
class LightColorEditor(CallbackFunctions):
"""Class for a light source color editor"""
property = ["ambient", "diffuse", "specular"]
def Callbacks(self, event=None):
"""Implement callback functions"""
tkrgb = self.colorChooser.hsWheel.Get(mode="TkRGB")
c = self.currentComponent.get()
self.mat[c] = self.colorChooser.hsWheel.Get(mode="RGB")[:3]
self.colw[c].config(background=tkrgb)
for f in self.callbacks:
f(self.property[c], self.mat[c])
def RestoreColor(self):
"""Set the wheel cursor to the color of the current component"""
c = self.currentComponent.get()
self.colorChooser.Set(self.mat[c], "RGB")
def Set(self, ambi=None, diff=None, spec=None, mode="RGB"):
"""Set the material Editor to a given material"""
assert mode in ("HSV", "RGB")
c = self.currentComponent.get()
if ambi:
ambi = colorTool.OneColor(ambi)
tkrgb = colorTool.TkColor(ambi[:3])
self.colw[0].config(background=tkrgb)
self.mat[0][:3] = ambi[:3]
if c == 0:
self.colorChooser.Set(ambi, "RGB")
if diff:
diff = colorTool.OneColor(diff)
tkrgb = colorTool.TkColor(diff[:3])
self.colw[1].config(background=tkrgb)
self.mat[1][:3] = diff[:3]
if c == 1:
self.colorChooser.Set(diff, "RGB")
if spec:
spec = colorTool.OneColor(spec)
tkrgb = colorTool.TkColor(spec[:3])
self.colw[2].config(background=tkrgb)
self.mat[2][:3] = spec[:3]
if c == 2:
self.colorChooser.Set(spec, "RGB")
def __init__(self, root=None, colorChooser=None):
CallbackFunctions.__init__(self)
self.frame = tkinter.Frame(root, relief=tkinter.RIDGE, borderwidth=3)
self.mat = Numeric.ones((3, 3), "f")
self.currentComponent = tkinter.IntVar()
self.currentComponent.set(0)
self.colw = [
0,
] * 4
width = 9
self.colw[0] = tkinter.Radiobutton(
self.frame,
text="Ambient",
value=0,
variable=self.currentComponent,
command=self.RestoreColor,
relief=tkinter.SUNKEN,
width=width,
borderwidth=3,
background="#FFFFFF",
foreground="#000000",
)
self.colw[0].grid(row=0, column=0)
self.colw[1] = tkinter.Radiobutton(
self.frame,
text="Diffuse",
value=1,
variable=self.currentComponent,
command=self.RestoreColor,
borderwidth=3,
width=width,
relief=tkinter.SUNKEN,
background="#FFFFFF",
foreground="#000000",
)
self.colw[1].grid(row=1, column=0)
self.colw[2] = tkinter.Radiobutton(
self.frame,
text="Specular",
value=2,
variable=self.currentComponent,
command=self.RestoreColor,
relief=tkinter.SUNKEN,
width=width,
borderwidth=3,
background="#FFFFFF",
foreground="#000000",
)
self.colw[2].grid(row=0, column=1)
if not colorChooser:
self.colorChooser = ColorChooser.ColorChooser(self.frame)
self.colorChooser.frame.grid(row=3, column=0, columnspan=2)
else:
self.colorChooser = colorChooser
self.colorChooser.AddCallback(self.Callbacks)
if __name__ == "__main__":
root = tkinter.Tk()
root.title("Material Editor")
me = MaterialEditor(root)
me.frame.pack()
lce = LightColorEditor(root)
lce.frame.pack()
| 32.710247
| 110
| 0.538295
| 986
| 9,257
| 5.028398
| 0.151116
| 0.040339
| 0.016337
| 0.035296
| 0.817063
| 0.790641
| 0.786608
| 0.786608
| 0.766035
| 0.766035
| 0
| 0.032894
| 0.32019
| 9,257
| 282
| 111
| 32.826241
| 0.754966
| 0.06676
| 0
| 0.781659
| 0
| 0
| 0.036282
| 0
| 0
| 0
| 0
| 0
| 0.008734
| 1
| 0.034935
| false
| 0
| 0.017467
| 0
| 0.069869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2af55bf812268cf975fc4696a9607d6dd842fea9
| 115
|
py
|
Python
|
pylinky/__init__.py
|
LudovicRousseau/pyLinky
|
62bbcc8355167014d4e763d7c5de6faaf0dd39ef
|
[
"Apache-2.0"
] | 42
|
2018-06-18T14:55:11.000Z
|
2021-09-16T20:56:51.000Z
|
pylinky/__init__.py
|
LudovicRousseau/pyLinky
|
62bbcc8355167014d4e763d7c5de6faaf0dd39ef
|
[
"Apache-2.0"
] | 27
|
2018-04-27T07:51:07.000Z
|
2020-10-03T19:20:37.000Z
|
pylinky/__init__.py
|
LudovicRousseau/pyLinky
|
62bbcc8355167014d4e763d7c5de6faaf0dd39ef
|
[
"Apache-2.0"
] | 27
|
2018-01-27T22:48:51.000Z
|
2020-07-21T22:12:47.000Z
|
from pylinky.client import AbstractAuth
from pylinky.client import LinkyAPI
from pylinky.client import LinkyClient
| 28.75
| 39
| 0.869565
| 15
| 115
| 6.666667
| 0.466667
| 0.33
| 0.51
| 0.69
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 115
| 3
| 40
| 38.333333
| 0.970874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
2aff5dedb9c340f9f6a96e1eedfd2c84fc2cfef2
| 101
|
py
|
Python
|
project/__init__.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | null | null | null |
project/__init__.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | null | null | null |
project/__init__.py
|
phiratio/django-forums-app
|
a8d50b436bc34f74ab8c58234f5f7cf5175e00c5
|
[
"MIT"
] | null | null | null |
# from celery import app as celery_app
from .celery import app as celery_app
__all__ = ['celery_app']
| 33.666667
| 38
| 0.782178
| 17
| 101
| 4.235294
| 0.352941
| 0.375
| 0.444444
| 0.527778
| 0.833333
| 0.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.148515
| 101
| 3
| 39
| 33.666667
| 0.837209
| 0.356436
| 0
| 0
| 0
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
63028d028aaafa4ccfda566656125ddeaaeb9fae
| 28,164
|
py
|
Python
|
api/migrations/0001_initial.py
|
evan-rusin/fly-project
|
8afc697f2a9fb63317cca2763ed0ed76f9ef2ead
|
[
"BSD-2-Clause"
] | 15
|
2016-11-17T08:34:52.000Z
|
2021-11-12T07:08:58.000Z
|
api/migrations/0001_initial.py
|
evan-rusin/fly-project
|
8afc697f2a9fb63317cca2763ed0ed76f9ef2ead
|
[
"BSD-2-Clause"
] | 137
|
2015-12-07T19:48:03.000Z
|
2016-10-11T20:19:33.000Z
|
api/migrations/0001_initial.py
|
evan-rusin/fly-project
|
8afc697f2a9fb63317cca2763ed0ed76f9ef2ead
|
[
"BSD-2-Clause"
] | 11
|
2016-10-21T22:43:54.000Z
|
2021-08-28T14:41:02.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-24 18:47
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(db_index=True, primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Badge'), (2, 'Goal Badge'), (3, 'Education Badge'), (4, 'Resource Badge')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)])),
('icon', models.CharField(blank=True, max_length=31, null=True)),
('colour', models.CharField(blank=True, max_length=31, null=True)),
('level', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9999)])),
('title', models.CharField(blank=True, max_length=63, null=True)),
('title_en', models.CharField(blank=True, max_length=63, null=True)),
('title_fr', models.CharField(blank=True, max_length=63, null=True)),
('title_es', models.CharField(blank=True, max_length=63, null=True)),
('description', models.CharField(blank=True, max_length=511, null=True)),
('description_en', models.CharField(blank=True, max_length=511, null=True)),
('description_fr', models.CharField(blank=True, max_length=511, null=True)),
('description_es', models.CharField(blank=True, max_length=511, null=True)),
('has_xp_requirement', models.BooleanField(default=True)),
('required_xp', models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9999)])),
],
options={
'db_table': 'fly_badges',
},
),
migrations.CreateModel(
name='BannedDomain',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(db_index=True, max_length=63, unique=True)),
('banned_on', models.DateField(auto_now_add=True, null=True)),
('reason', models.CharField(blank=True, max_length=127, null=True)),
],
options={
'db_table': 'fly_banned_domains',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='BannedIP',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('address', models.GenericIPAddressField(db_index=True, unique=True)),
('banned_on', models.DateField(auto_now_add=True, null=True)),
('reason', models.CharField(blank=True, max_length=127, null=True)),
],
options={
'db_table': 'fly_banned_ips',
'ordering': ('address',),
},
),
migrations.CreateModel(
name='BannedWord',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('text', models.CharField(db_index=True, max_length=63, unique=True)),
('banned_on', models.DateField(auto_now_add=True, null=True)),
('reason', models.CharField(blank=True, max_length=127, null=True)),
],
options={
'db_table': 'fly_banned_words',
'ordering': ('text',),
},
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(db_index=True, primary_key=True, serialize=False)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Savings'), (2, 'Credit'), (3, 'Goal')], db_index=True, default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
('image', models.CharField(blank=True, max_length=63, null=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('title', models.CharField(blank=True, max_length=63, null=True)),
('title_en', models.CharField(blank=True, max_length=63, null=True)),
('title_fr', models.CharField(blank=True, max_length=63, null=True)),
('title_es', models.CharField(blank=True, max_length=63, null=True)),
('summary', models.CharField(blank=True, max_length=255, null=True)),
('summary_en', models.CharField(blank=True, max_length=255, null=True)),
('summary_fr', models.CharField(blank=True, max_length=255, null=True)),
('summary_es', models.CharField(blank=True, max_length=255, null=True)),
('description', models.CharField(blank=True, max_length=511, null=True)),
('description_en', models.CharField(blank=True, max_length=511, null=True)),
('description_fr', models.CharField(blank=True, max_length=511, null=True)),
('description_es', models.CharField(blank=True, max_length=511, null=True)),
('video_url', models.URLField(blank=True, null=True)),
('duration', models.PositiveSmallIntegerField(choices=[(5, '5 Minutes'), (30, '30 Minutes'), (60, '1 Hour')], default=5, validators=[django.core.validators.MinValueValidator(5), django.core.validators.MaxValueValidator(60)])),
('awarded_xp', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9999)])),
('has_prerequisites', models.BooleanField(db_index=True, default=False)),
('prerequisites', models.ManyToManyField(blank=True, related_name='_course_prerequisites_+', to='api.Course')),
],
options={
'db_table': 'fly_courses',
},
),
migrations.CreateModel(
name='CreditGoal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('is_locked', models.BooleanField(default=False)),
('goal_type', models.PositiveSmallIntegerField(blank=True, db_index=True, default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(3)])),
('unlocks', models.DateTimeField(blank=True, null=True)),
('is_closed', models.BooleanField(db_index=True, default=False)),
('was_accomplished', models.BooleanField(db_index=True, default=False)),
('earned_xp', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)])),
('points', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(850)])),
('times', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(99)])),
('period', models.PositiveSmallIntegerField(choices=[(1, 'Weeks'), (2, 'Months')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_credit_goals',
},
),
migrations.CreateModel(
name='EnrolledCourse',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('finished', models.DateTimeField(blank=True, null=True)),
('is_finished', models.BooleanField(default=False)),
('final_mark', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_enrolled_courses',
},
),
migrations.CreateModel(
name='FinalGoal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('is_locked', models.BooleanField(default=False)),
('goal_type', models.PositiveSmallIntegerField(blank=True, db_index=True, default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(3)])),
('unlocks', models.DateTimeField(blank=True, null=True)),
('is_closed', models.BooleanField(db_index=True, default=False)),
('was_accomplished', models.BooleanField(db_index=True, default=False)),
('earned_xp', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)])),
('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('for_want', models.PositiveSmallIntegerField(choices=[(1, 'House'), (2, 'Business'), (3, 'Vacation'), (4, 'Retirement'), (5, 'General Savings'), (6, 'Other')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(6)])),
('for_other_want', models.CharField(blank=True, default='', max_length=63, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_final_goals',
},
),
migrations.CreateModel(
name='ImageUpload',
fields=[
('upload_id', models.AutoField(primary_key=True, serialize=False)),
('upload_date', models.DateField(auto_now=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='upload')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_image_uploads',
},
),
migrations.CreateModel(
name='Me',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('avatar', models.ImageField(blank=True, null=True, upload_to='upload')),
('xp', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(99999)])),
('xp_percent', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('wants_newsletter', models.BooleanField(default=False)),
('wants_goal_notify', models.BooleanField(default=False)),
('wants_course_notify', models.BooleanField(default=False)),
('wants_resource_notify', models.BooleanField(default=False)),
('badges', models.ManyToManyField(blank=True, related_name='fly_user_badges', to='api.Badge')),
('courses', models.ManyToManyField(blank=True, related_name='fly_user_enrolled_courses', to='api.EnrolledCourse')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_mes',
},
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Level Up Notifiction'), (2, 'New Badge Notification'), (3, 'Custom Notification')], db_index=True, default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
('title', models.CharField(blank=True, max_length=511, null=True)),
('description', models.CharField(blank=True, max_length=511, null=True)),
('badge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Badge')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_notifications',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('num', models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9999)])),
('text', models.CharField(blank=True, max_length=511, null=True)),
('text_en', models.CharField(blank=True, max_length=511, null=True)),
('text_fr', models.CharField(blank=True, max_length=511, null=True)),
('text_es', models.CharField(blank=True, max_length=511, null=True)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Open-Ended'), (2, 'Partial'), (3, 'All-or-None')], db_index=True, default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
('a', models.CharField(blank=True, max_length=255, null=True)),
('a_en', models.CharField(blank=True, max_length=255, null=True)),
('a_fr', models.CharField(blank=True, max_length=255, null=True)),
('a_es', models.CharField(blank=True, max_length=255, null=True)),
('a_is_correct', models.BooleanField(default=False)),
('b', models.CharField(blank=True, max_length=255, null=True)),
('b_en', models.CharField(blank=True, max_length=255, null=True)),
('b_fr', models.CharField(blank=True, max_length=255, null=True)),
('b_es', models.CharField(blank=True, max_length=255, null=True)),
('b_is_correct', models.BooleanField(default=False)),
('c', models.CharField(blank=True, max_length=255, null=True)),
('c_en', models.CharField(blank=True, max_length=255, null=True)),
('c_fr', models.CharField(blank=True, max_length=255, null=True)),
('c_es', models.CharField(blank=True, max_length=255, null=True)),
('c_is_correct', models.BooleanField(default=False)),
('d', models.CharField(blank=True, max_length=255, null=True)),
('d_en', models.CharField(blank=True, max_length=255, null=True)),
('d_fr', models.CharField(blank=True, max_length=255, null=True)),
('d_es', models.CharField(blank=True, max_length=255, null=True)),
('d_is_correct', models.BooleanField(default=False)),
('e', models.CharField(blank=True, max_length=255, null=True)),
('e_en', models.CharField(blank=True, max_length=255, null=True)),
('e_fr', models.CharField(blank=True, max_length=255, null=True)),
('e_es', models.CharField(blank=True, max_length=255, null=True)),
('e_is_correct', models.BooleanField(default=False)),
('f', models.CharField(blank=True, max_length=255, null=True)),
('f_en', models.CharField(blank=True, max_length=255, null=True)),
('f_fr', models.CharField(blank=True, max_length=255, null=True)),
('f_es', models.CharField(blank=True, max_length=255, null=True)),
('f_is_correct', models.BooleanField(default=False)),
],
options={
'db_table': 'fly_questions',
},
),
migrations.CreateModel(
name='QuestionSubmission',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Open-Ended'), (2, 'Partial'), (3, 'All-or-None')], db_index=True, default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
('a', models.BooleanField(default=False)),
('b', models.BooleanField(default=False)),
('c', models.BooleanField(default=False)),
('d', models.BooleanField(default=False)),
('e', models.BooleanField(default=False)),
('f', models.BooleanField(default=False)),
('mark', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Question')),
],
options={
'db_table': 'fly_question_submissions',
},
),
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, max_length=63, null=True)),
('title_en', models.CharField(blank=True, max_length=63, null=True)),
('title_fr', models.CharField(blank=True, max_length=63, null=True)),
('title_es', models.CharField(blank=True, max_length=63, null=True)),
('description', models.CharField(blank=True, max_length=511, null=True)),
('description_en', models.CharField(blank=True, max_length=511, null=True)),
('description_fr', models.CharField(blank=True, max_length=511, null=True)),
('description_es', models.CharField(blank=True, max_length=511, null=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Course')),
],
options={
'db_table': 'fly_quizzes',
},
),
migrations.CreateModel(
name='QuizSubmission',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('finished', models.DateTimeField(blank=True, null=True)),
('is_finished', models.BooleanField(default=False)),
('final_mark', models.FloatField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Course')),
('quiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Quiz')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_quiz_submissions',
},
),
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=127)),
('title_en', models.CharField(max_length=127, null=True)),
('title_fr', models.CharField(max_length=127, null=True)),
('title_es', models.CharField(max_length=127, null=True)),
('url', models.URLField()),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Social Media'), (2, 'Blogs'), (3, 'Other Cool Apps')], db_index=True, default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
],
options={
'db_table': 'fly_resource_links',
},
),
migrations.CreateModel(
name='SavingsGoal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('is_locked', models.BooleanField(default=False)),
('goal_type', models.PositiveSmallIntegerField(blank=True, db_index=True, default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(3)])),
('unlocks', models.DateTimeField(blank=True, null=True)),
('is_closed', models.BooleanField(db_index=True, default=False)),
('was_accomplished', models.BooleanField(db_index=True, default=False)),
('earned_xp', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)])),
('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('times', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(99)])),
('period', models.PositiveSmallIntegerField(choices=[(1, 'Weeks'), (2, 'Months')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(2)])),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_savings_goals',
},
),
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(db_index=True, primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.PositiveSmallIntegerField(choices=[(1, 'Level Up Share'), (2, 'New Badge Share'), (3, 'Custom Share')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(3)])),
('custom_title', models.CharField(blank=True, max_length=511, null=True)),
('custom_description', models.CharField(blank=True, max_length=511, null=True)),
('custom_url', models.URLField(blank=True, null=True)),
('notification_id', models.PositiveIntegerField(blank=True)),
('badge', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.Badge')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'fly_shares',
},
),
migrations.CreateModel(
name='XPLevel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, max_length=31, null=True)),
('num', models.PositiveSmallIntegerField(choices=[(5, '5 Minutes'), (30, '30 Minutes'), (60, '1 Hour')], default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(9999)])),
('min_xp', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)])),
('max_xp', models.PositiveSmallIntegerField(default=25, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)])),
],
options={
'db_table': 'fly_xp_levels',
},
),
migrations.AddField(
model_name='share',
name='xplevel',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.XPLevel'),
),
migrations.AddField(
model_name='questionsubmission',
name='quiz',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Quiz'),
),
migrations.AddField(
model_name='questionsubmission',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='question',
name='quiz',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Quiz'),
),
migrations.AddField(
model_name='notification',
name='xplevel',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.XPLevel'),
),
migrations.AddField(
model_name='me',
name='xplevel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.XPLevel'),
),
migrations.CreateModel(
name='OrderedCourse',
fields=[
],
options={
'proxy': True,
'ordering': ('created',),
},
bases=('api.course',),
),
migrations.CreateModel(
name='OrderedCreditGoal',
fields=[
],
options={
'proxy': True,
'ordering': ('-created',),
},
bases=('api.creditgoal',),
),
migrations.CreateModel(
name='OrderedFinalGoal',
fields=[
],
options={
'proxy': True,
'ordering': ('-created',),
},
bases=('api.finalgoal',),
),
migrations.CreateModel(
name='OrderedQuestion',
fields=[
],
options={
'proxy': True,
'ordering': ('num',),
},
bases=('api.question',),
),
migrations.CreateModel(
name='OrderedSavingsGoal',
fields=[
],
options={
'proxy': True,
'ordering': ('-created',),
},
bases=('api.savingsgoal',),
),
]
| 60.961039
| 292
| 0.595015
| 2,817
| 28,164
| 5.813277
| 0.083777
| 0.048913
| 0.054775
| 0.099658
| 0.845445
| 0.828591
| 0.794333
| 0.782792
| 0.758305
| 0.732658
| 0
| 0.0201
| 0.254545
| 28,164
| 461
| 293
| 61.093275
| 0.759895
| 0.002379
| 0
| 0.573951
| 1
| 0
| 0.107781
| 0.00331
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011038
| 0
| 0.019868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6323a94ec9e41e9664c9046abdccf7481a4c64d1
| 7,783
|
py
|
Python
|
neutralocean/eos/jmdfwg06.py
|
geoffstanley/neutralocean
|
5e93c9732d3a64bf4c5dcb81a6d2f47839b0c6f7
|
[
"MIT"
] | 10
|
2022-03-03T16:00:01.000Z
|
2022-03-14T18:51:08.000Z
|
neutralocean/eos/jmdfwg06.py
|
geoffstanley/neutralocean
|
5e93c9732d3a64bf4c5dcb81a6d2f47839b0c6f7
|
[
"MIT"
] | null | null | null |
neutralocean/eos/jmdfwg06.py
|
geoffstanley/neutralocean
|
5e93c9732d3a64bf4c5dcb81a6d2f47839b0c6f7
|
[
"MIT"
] | null | null | null |
"""
Density of Sea Water using the Jackett et al. (2006) [1]_ function
Functions:
rho :: computes in-situ density from salinity, potential temperature and
pressure
rho_s_t :: compute the partial derivatives of in-situ density with
respect to salinity and potential temperature
rho_p :: compute the partial derivative of in-situ density with
respect to pressure
Notes:
To make Boussinesq versions of these functions, see
`neutralocean.eos.tools.make_eos_bsq`.
To make vectorized versions of these functions, see
`neutralocean.eos.tools.vectorize_eos`.
.. [1] Jackett, D. R., McDougall, T. J., Feistel, R., Wright, D. G., &
Griffies, S. M. (2006). Algorithms for Density, Potential Temperature,
Conservative Temperature, and the Freezing Temperature of Seawater.
Journal of Atmospheric and Oceanic Technology, 23(12), 1709–1728.
https://doi.org/10.1175/JTECH1946.1
History
-------
Code adapted from MOM5.1 (Griffies et al) originally written in Fortran
2022 January 14 - David Hutchinson - Translated into python
2022 January 14 - Geoff Stanley - code optimization, partial derivatives, check vals
"""
import numpy as np
import numba as nb
@nb.njit(nb.f8(nb.f8, nb.f8, nb.f8))
def rho(s, t, p):
"""
Parameters
----------
s : float
Practical salinity [PSS-78]
t : float
Potential temperature [ITS-90]
p : float
Pressure [dbar]
Returns
-------
rho : float
In-situ density [kg m-3]
"""
# List of coefficients for the rational function
# fmt: off
a0 = 9.9984085444849347e+02
a1 = 7.3471625860981584e+00
a2 = -5.3211231792841769e-02
a3 = 3.6492439109814549e-04
a4 = 2.5880571023991390e+00
a5 = -6.7168282786692355e-03
a6 = 1.9203202055760151e-03
a7 = 1.1798263740430364e-02
a8 = 9.8920219266399117e-08
a9 = 4.6996642771754730e-06
a10 = -2.5862187075154352e-08
a11 = -3.2921414007960662e-12
b0 = 1.0000000000000000e+00
b1 = 7.2815210113327091e-03
b2 = -4.4787265461983921e-05
b3 = 3.3851002965802430e-07
b4 = 1.3651202389758572e-10
b5 = 1.7632126669040377e-03
b6 = -8.8066583251206474e-06
b7 = -1.8832689434804897e-10
b8 = 5.7463776745432097e-06
b9 = 1.4716275472242334e-09
b10 = 6.7103246285651894e-06
b11 = -2.4461698007024582e-17
b12 = -9.1534417604289062e-18
# fmt: on
epsln = 1.0e-40
# Precompute some commonly used terms
t2 = t * t
# Rational function for density
num = (
a0
+ t * (a1 + t * (a2 + a3 * t))
+ s * (a4 + a5 * t + a6 * s)
+ p * (a7 + a8 * t2 + a9 * s + p * (a10 + a11 * t2))
)
inv_den = 1.0 / (
b0
+ t * (b1 + t * (b2 + t * (b3 + t * b4)))
+ s * (b5 + t * (b6 + b7 * t2) + np.sqrt(s) * (b8 + b9 * t2))
+ p * (b10 + p * t * (b11 * t2 + b12 * p))
+ epsln
)
return num * inv_den
@nb.njit
def rho_s_t(s, t, p):
"""
Parameters
----------
s : float
Practical salinity [PSS-78]
t : float
Potential temperature [ITS-90]
p : float
Pressure [dbar]
Returns
-------
rho_s : float
Partial derivative of in-situ density with respect to salinity [kg m-3 psu-1]
rho_t : float
Partial derivative of in-situ density with respect to temperature [kg m-3 degc-1]
"""
# List of coefficients for the rational function
# fmt: off
a0 = 9.9984085444849347e+02
a1 = 7.3471625860981584e+00
a2 = -5.3211231792841769e-02
a3 = 3.6492439109814549e-04
a4 = 2.5880571023991390e+00
a5 = -6.7168282786692355e-03
a6 = 1.9203202055760151e-03
a7 = 1.1798263740430364e-02
a8 = 9.8920219266399117e-08
a9 = 4.6996642771754730e-06
a10 = -2.5862187075154352e-08
a11 = -3.2921414007960662e-12
b0 = 1.0000000000000000e+00
b1 = 7.2815210113327091e-03
b2 = -4.4787265461983921e-05
b3 = 3.3851002965802430e-07
b4 = 1.3651202389758572e-10
b5 = 1.7632126669040377e-03
b6 = -8.8066583251206474e-06
b7 = -1.8832689434804897e-10
b8 = 5.7463776745432097e-06
b9 = 1.4716275472242334e-09
b10 = 6.7103246285651894e-06
b11 = -2.4461698007024582e-17
b12 = -9.1534417604289062e-18
# fmt: on
epsln = 1.0e-40
# Precompute some commonly used terms
t2 = t * t
sp5 = np.sqrt(s)
pt = p * t
# Rational function for density
num = (
a0
+ t * (a1 + t * (a2 + a3 * t))
+ s * (a4 + a5 * t + a6 * s)
+ p * (a7 + a8 * t2 + a9 * s + p * (a10 + a11 * t2))
)
inv_den = 1.0 / (
b0
+ t * (b1 + t * (b2 + t * (b3 + t * b4)))
+ s * (b5 + t * (b6 + b7 * t2) + sp5 * (b8 + b9 * t2))
+ p * (b10 + pt * (b11 * t2 + b12 * p))
+ epsln
)
# The density is
# rho = num / den
# Taking the partial derivative w.r.t. s gives
# rho_s = (num_s - num * den_s / den ) / den
# and similarly for rho_t
num_s = a4 + a5 * t + 2.0 * a6 * s + p * a9
num_t = (
a1 + t * (2.0 * a2 + 3.0 * a3 * t) + a5 * s + 2.0 * a8 * pt + 2.0 * a11 * p * pt
)
den_s = b5 + t * (b6 + b7 * t2) + sp5 * (1.5 * b8 + 1.5 * b9 * t2)
den_t = (
b1
+ t * (2.0 * b2 + t * (3.0 * b3 + 4.0 * b4 * t))
+ s * (b6 + 3.0 * b7 * t2 + 2.0 * b9 * sp5 * t)
+ 3.0 * b11 * pt * pt
+ b12 * p ** 3
)
rho_s = (num_s - num * den_s * inv_den) * inv_den
rho_t = (num_t - num * den_t * inv_den) * inv_den
return rho_s, rho_t
@nb.njit(nb.f8(nb.f8, nb.f8, nb.f8))
def rho_p(s, t, p):
"""
Parameters
----------
s : float
Practical salinity [PSS-78]
t : float
Potential temperature [ITS-90]
p : float
Pressure [dbar]
Returns
-------
rho_p : float
Partial derivative of in-situ density with respect to pressure [kg m-3 dbar-1]
"""
# List of coefficients for the rational function
# fmt: off
a0 = 9.9984085444849347e+02
a1 = 7.3471625860981584e+00
a2 = -5.3211231792841769e-02
a3 = 3.6492439109814549e-04
a4 = 2.5880571023991390e+00
a5 = -6.7168282786692355e-03
a6 = 1.9203202055760151e-03
a7 = 1.1798263740430364e-02
a8 = 9.8920219266399117e-08
a9 = 4.6996642771754730e-06
a10 = -2.5862187075154352e-08
a11 = -3.2921414007960662e-12
b0 = 1.0000000000000000e+00
b1 = 7.2815210113327091e-03
b2 = -4.4787265461983921e-05
b3 = 3.3851002965802430e-07
b4 = 1.3651202389758572e-10
b5 = 1.7632126669040377e-03
b6 = -8.8066583251206474e-06
b7 = -1.8832689434804897e-10
b8 = 5.7463776745432097e-06
b9 = 1.4716275472242334e-09
b10 = 6.7103246285651894e-06
b11 = -2.4461698007024582e-17
b12 = -9.1534417604289062e-18
# fmt: on
epsln = 1.0e-40
# Precompute some commonly used terms
t2 = t * t
# Rational function for density
num = (
a0
+ t * (a1 + t * (a2 + a3 * t))
+ s * (a4 + a5 * t + a6 * s)
+ p * (a7 + a8 * t2 + a9 * s + p * (a10 + a11 * t2))
)
inv_den = 1.0 / (
b0
+ t * (b1 + t * (b2 + t * (b3 + t * b4)))
+ s * (b5 + t * (b6 + b7 * t2) + np.sqrt(s) * (b8 + b9 * t2))
+ p * (b10 + p * t * (b11 * t2 + b12 * p))
+ epsln
)
# The density is
# rho = num / den
# Taking the partial derivative w.r.t. p gives
# rho_p = (num_p - num * den_p / den ) / den
num_p = a7 + a8 * t2 + a9 * s + p * (2.0 * a10 + 2.0 * a11 * t2)
den_p = b10 + p * t * (2.0 * b11 * t2 + 3.0 * b12 * p)
return (num_p - num * den_p * inv_den) * inv_den
| 27.024306
| 89
| 0.558525
| 1,090
| 7,783
| 3.946789
| 0.194495
| 0.013947
| 0.021153
| 0.011158
| 0.750814
| 0.743375
| 0.743375
| 0.730126
| 0.698047
| 0.698047
| 0
| 0.337584
| 0.314917
| 7,783
| 287
| 90
| 27.118467
| 0.469055
| 0.332905
| 0
| 0.765101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020134
| false
| 0
| 0.013423
| 0
| 0.053691
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2d640d37aa341f6fd238012b5c62138f39effa1c
| 9,904
|
py
|
Python
|
scraper/user_agents.py
|
2knal/medium-for-all
|
8a53c8d3393f1e41519a4d5cbc74c07a89fcec97
|
[
"MIT"
] | 3
|
2020-05-31T22:34:56.000Z
|
2020-06-01T10:59:58.000Z
|
scraper/user_agents.py
|
2knal/medium-for-all
|
8a53c8d3393f1e41519a4d5cbc74c07a89fcec97
|
[
"MIT"
] | 1
|
2021-03-31T19:57:21.000Z
|
2021-03-31T19:57:21.000Z
|
scraper/user_agents.py
|
2knal/medium-for-all
|
8a53c8d3393f1e41519a4d5cbc74c07a89fcec97
|
[
"MIT"
] | null | null | null |
useragents = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36",
"Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36",
"Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14"
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0",
"Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/23.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:23.0) Gecko/20131011 Firefox/23.0",
"Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20130405 Firefox/22.0",
"Mozilla/5.0 (Microsoft Windows NT 6.2.9200.0); rv:22.0) Gecko/20130405 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:21.0.0) Gecko/20121011 Firefox/21.0.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.2; rv:21.0) Gecko/20130326 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130401 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130330 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130401 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130401 Firefox/21.0",
"Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0",
"Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64;) Gecko/20100101 Firefox/20.0",
"Mozilla/5.0 (Windows x86; rv:19.0) Gecko/20100101 Firefox/19.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/19.0",
"Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/18.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6"
]
| 96.15534
| 251
| 0.68649
| 1,937
| 9,904
| 3.495612
| 0.075891
| 0.030128
| 0.134249
| 0.167774
| 0.899129
| 0.889234
| 0.865899
| 0.853197
| 0.844188
| 0.787771
| 0
| 0.264699
| 0.137924
| 9,904
| 102
| 252
| 97.098039
| 0.528344
| 0
| 0
| 0
| 0
| 0.980392
| 0.91771
| 0.00212
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
937780b4ca634115490d05b7e15c4c0006ab7e12
| 7,018
|
py
|
Python
|
gluon/policies/net_l3vpn.py
|
lfntac/ipv6
|
1cf305a5fe370e71157723a40833c73aeffdf35e
|
[
"Apache-2.0"
] | null | null | null |
gluon/policies/net_l3vpn.py
|
lfntac/ipv6
|
1cf305a5fe370e71157723a40833c73aeffdf35e
|
[
"Apache-2.0"
] | null | null | null |
gluon/policies/net_l3vpn.py
|
lfntac/ipv6
|
1cf305a5fe370e71157723a40833c73aeffdf35e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from gluon.policies import base
# TODO(JinLi) This file is NOT used, it is an example of moving policy to code.
# Unlike other Openstack projects whose api has a fix set of restControllers,
# Gluon dynamically generates its restControllers from yaml files. If Gluon
# follows the policy in code approach, Gluon users will need to modify source
# code by adding similar files like this one. And then call the list_rules
# function inside the gluon.policies.__init__.py
#
# Gluon takes a different approach by defining policies inside the yaml file of
# a model, so that users do not need to modify any source code
#
# If user prefers to use plicy in code, they can use this file. And create
# similar file for new service.
net_l3vpn_policies = [
policy.RuleDefault(
name='net-l3vpn:create_dataplanetunnels',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_dataplanetunnels',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_dataplanetunnels',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_dataplanetunnels',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_dataplanetunnels',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_bgppeerings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_bgppeerings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_bgppeerings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_bgppeerings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_bgppeerings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_vpnafconfigs',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_vpnafconfigs',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_vpnafconfigs',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_vpnafconfigs',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_vpnafconfigs',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_vpnservices',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_vpnservices',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_vpnservices',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_vpnservices',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_vpnservices',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_interfaces',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_interfaces',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_interfaces',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_interfaces',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_interfaces',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_vpnbindings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_vpnbindings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_vpnbindings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_vpnbindings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_vpnbindings',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:create_ports',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_ports',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:update_ports',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:get_one_ports',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
),
policy.RuleDefault(
name='net-l3vpn:delete_ports',
check_str=base.RULE_ADMIN_OR_OWNER,
description='net-l3vpn policy'
)
]
def list_rules():
return net_l3vpn_policies
| 32.64186
| 79
| 0.674551
| 869
| 7,018
| 5.225547
| 0.174914
| 0.126844
| 0.161859
| 0.184981
| 0.757322
| 0.757322
| 0.749615
| 0.749615
| 0.749615
| 0.749615
| 0
| 0.014804
| 0.22998
| 7,018
| 214
| 80
| 32.794393
| 0.8255
| 0.181961
| 0
| 0.767956
| 0
| 0
| 0.266188
| 0.164858
| 0
| 0
| 0
| 0.004673
| 0
| 1
| 0.005525
| false
| 0
| 0.01105
| 0.005525
| 0.022099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fad38150fbaceff59219291857b8a0d9d0bbccd0
| 107
|
py
|
Python
|
moto/kms/utils.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 2
|
2019-07-10T14:44:12.000Z
|
2020-06-08T17:26:29.000Z
|
moto/kms/utils.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 5
|
2018-04-25T21:04:20.000Z
|
2018-11-02T19:59:27.000Z
|
moto/kms/utils.py
|
alexsult/moto
|
ed861ecae1039a048a6350a4ff832ef094cdf2c2
|
[
"Apache-2.0"
] | 12
|
2017-09-06T22:11:15.000Z
|
2021-05-28T17:22:31.000Z
|
from __future__ import unicode_literals
import uuid
def generate_key_id():
return str(uuid.uuid4())
| 13.375
| 39
| 0.766355
| 15
| 107
| 5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.158879
| 107
| 7
| 40
| 15.285714
| 0.822222
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
35562806951685a55d5a7c7552c48e3aa7b5ca00
| 5,913
|
py
|
Python
|
tests/test_todo_parser.py
|
thejoeejoee/todo-to-issue-action
|
65d59d6b32c85cabd1b099ae59268d71da804d00
|
[
"MIT"
] | null | null | null |
tests/test_todo_parser.py
|
thejoeejoee/todo-to-issue-action
|
65d59d6b32c85cabd1b099ae59268d71da804d00
|
[
"MIT"
] | null | null | null |
tests/test_todo_parser.py
|
thejoeejoee/todo-to-issue-action
|
65d59d6b32c85cabd1b099ae59268d71da804d00
|
[
"MIT"
] | null | null | null |
import os
import unittest
import json
from main import TodoParser
def count_issues_for_file_type(raw_issues, file_type):
num_issues = 0
for issue in raw_issues:
if issue.markdown_language == file_type:
num_issues += 1
return num_issues
class NewIssueTests(unittest.TestCase):
# Check for newly added TODOs across the files specified.
def setUp(self):
diff_file = open('tests/test_new.diff', 'r')
parser = TodoParser()
with open('syntax.json', 'r') as syntax_json:
parser.syntax_dict = json.load(syntax_json)
self.raw_issues = parser.parse(diff_file)
def test_python_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 4)
def test_yaml_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'yaml'), 2)
def test_php_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'php'), 4)
def test_java_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'java'), 2)
def test_ruby_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'ruby'), 3)
def test_abap_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'abap'), 2)
def test_sql_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'sql'), 1)
def test_tex_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'tex'), 2)
def test_julia_issues(self):
# TODO: Fix Julia markers
# The Julia tests are currently failing as @qwinters noticed in #96.
# It looks to be counting block comments twice as the line marker appears within the block marker.
# labels: bug
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'julia'), 2)
def test_autohotkey_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'autohotkey'), 1)
def test_handlebars_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'handlebars'), 2)
def test_org_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'text'), 2)
def test_scss_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'scss'), 2)
class ClosedIssueTests(unittest.TestCase):
# Check for removed TODOs across the files specified.
def setUp(self):
diff_file = open('tests/test_closed.diff', 'r')
parser = TodoParser()
with open('syntax.json', 'r') as syntax_json:
parser.syntax_dict = json.load(syntax_json)
self.raw_issues = parser.parse(diff_file)
def test_python_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 4)
def test_yaml_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'yaml'), 2)
def test_php_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'php'), 4)
def test_java_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'java'), 2)
def test_ruby_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'ruby'), 3)
def test_abap_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'abap'), 2)
def test_sql_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'sql'), 1)
def test_tex_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'tex'), 2)
def test_julia_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'julia'), 2)
def test_autohotkey_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'autohotkey'), 1)
def test_handlebars_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'handlebars'), 2)
def test_org_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'text'), 2)
def test_scss_issues(self):
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'scss'), 2)
class IgnorePatternTests(unittest.TestCase):
def test_single_ignore(self):
os.environ['INPUT_IGNORE'] = '.*\\.java'
parser = TodoParser()
with open('syntax.json', 'r') as syntax_json:
parser.syntax_dict = json.load(syntax_json)
diff_file = open('tests/test_closed.diff', 'r')
self.raw_issues = parser.parse(diff_file)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 2)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'yaml'), 2)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'php'), 4)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'java'), 0)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'ruby'), 3)
os.environ['INPUT_IGNORE'] = ''
def test_multiple_ignores(self):
os.environ['INPUT_IGNORE'] = '.*\\.java, tests/example-file\\.php'
parser = TodoParser()
with open('syntax.json', 'r') as syntax_json:
parser.syntax_dict = json.load(syntax_json)
diff_file = open('tests/test_closed.diff', 'r')
self.raw_issues = parser.parse(diff_file)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 2)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'yaml'), 2)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'php'), 0)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'java'), 0)
self.assertEqual(count_issues_for_file_type(self.raw_issues, 'ruby'), 3)
os.environ['INPUT_IGNORE'] = ''
| 40.5
| 107
| 0.702351
| 831
| 5,913
| 4.66065
| 0.12154
| 0.097599
| 0.134263
| 0.17196
| 0.866512
| 0.860831
| 0.846372
| 0.846372
| 0.844823
| 0.844823
| 0
| 0.008304
| 0.185354
| 5,913
| 145
| 108
| 40.77931
| 0.795723
| 0.052258
| 0
| 0.815534
| 0
| 0
| 0.071301
| 0.016083
| 0
| 0
| 0
| 0.006897
| 0.349515
| 1
| 0.300971
| false
| 0
| 0.038835
| 0
| 0.378641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
356e41e008ab0a304ce0de791460204239ff200a
| 237
|
py
|
Python
|
extern/smplx_kinect/smplx_kinect/common/__init__.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | 1
|
2022-02-07T06:12:26.000Z
|
2022-02-07T06:12:26.000Z
|
extern/smplx_kinect/smplx_kinect/common/__init__.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
extern/smplx_kinect/smplx_kinect/common/__init__.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
import smplx_kinect.common.angle_representation
import smplx_kinect.common.body_models
import smplx_kinect.common.concater
import smplx_kinect.common.exp_bm_wrapper
import smplx_kinect.common.metrics
import smplx_kinect.common.smplx_vis
| 33.857143
| 47
| 0.898734
| 35
| 237
| 5.771429
| 0.4
| 0.326733
| 0.504951
| 0.683168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 237
| 6
| 48
| 39.5
| 0.897778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
35ac88a70a91e8fc54d72a0c0007d8538e83a97c
| 161
|
py
|
Python
|
demo/example/foo/admin.py
|
Nuurek/django-plans
|
97976521fa139a907aa13b66ab2b49d5c36948d2
|
[
"MIT"
] | 3
|
2018-02-26T10:56:28.000Z
|
2021-04-01T15:11:19.000Z
|
demo/example/foo/admin.py
|
Nuurek/django-plans
|
97976521fa139a907aa13b66ab2b49d5c36948d2
|
[
"MIT"
] | null | null | null |
demo/example/foo/admin.py
|
Nuurek/django-plans
|
97976521fa139a907aa13b66ab2b49d5c36948d2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Foo, Company, Profile
admin.site.register(Profile)
admin.site.register(Company)
admin.site.register(Foo)
| 17.888889
| 41
| 0.801242
| 23
| 161
| 5.608696
| 0.478261
| 0.209302
| 0.395349
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099379
| 161
| 8
| 42
| 20.125
| 0.889655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
35dc2a1bcd84afc2de81584378d63a7812b45c68
| 25,995
|
py
|
Python
|
sdk/python/pulumi_vault/aws/secret_backend.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2019-10-07T17:44:18.000Z
|
2022-03-30T20:46:33.000Z
|
sdk/python/pulumi_vault/aws/secret_backend.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 79
|
2019-10-11T18:13:07.000Z
|
2022-03-31T21:09:41.000Z
|
sdk/python/pulumi_vault/aws/secret_backend.py
|
pulumi/pulumi-vault
|
1682875f4a5d7d508f36e166529ad2b8aec34090
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-10-28T10:08:40.000Z
|
2020-03-17T14:20:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecretBackendArgs', 'SecretBackend']
@pulumi.input_type
class SecretBackendArgs:
def __init__(__self__, *,
access_key: Optional[pulumi.Input[str]] = None,
default_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_endpoint: Optional[pulumi.Input[str]] = None,
max_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
path: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
sts_endpoint: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SecretBackend resource.
:param pulumi.Input[str] access_key: The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[int] default_lease_ttl_seconds: The default TTL for credentials
issued by this backend.
:param pulumi.Input[str] description: A human-friendly description for this backend.
:param pulumi.Input[str] iam_endpoint: Specifies a custom HTTP IAM endpoint to use.
:param pulumi.Input[int] max_lease_ttl_seconds: The maximum TTL that can be requested
for credentials issued by this backend.
:param pulumi.Input[str] path: The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
:param pulumi.Input[str] region: The AWS region for API calls. Defaults to `us-east-1`.
:param pulumi.Input[str] secret_key: The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[str] sts_endpoint: Specifies a custom HTTP STS endpoint to use.
"""
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if default_lease_ttl_seconds is not None:
pulumi.set(__self__, "default_lease_ttl_seconds", default_lease_ttl_seconds)
if description is not None:
pulumi.set(__self__, "description", description)
if iam_endpoint is not None:
pulumi.set(__self__, "iam_endpoint", iam_endpoint)
if max_lease_ttl_seconds is not None:
pulumi.set(__self__, "max_lease_ttl_seconds", max_lease_ttl_seconds)
if path is not None:
pulumi.set(__self__, "path", path)
if region is not None:
pulumi.set(__self__, "region", region)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if sts_endpoint is not None:
pulumi.set(__self__, "sts_endpoint", sts_endpoint)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[pulumi.Input[str]]:
"""
The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="defaultLeaseTtlSeconds")
def default_lease_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The default TTL for credentials
issued by this backend.
"""
return pulumi.get(self, "default_lease_ttl_seconds")
@default_lease_ttl_seconds.setter
def default_lease_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_lease_ttl_seconds", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-friendly description for this backend.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="iamEndpoint")
def iam_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a custom HTTP IAM endpoint to use.
"""
return pulumi.get(self, "iam_endpoint")
@iam_endpoint.setter
def iam_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_endpoint", value)
@property
@pulumi.getter(name="maxLeaseTtlSeconds")
def max_lease_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The maximum TTL that can be requested
for credentials issued by this backend.
"""
return pulumi.get(self, "max_lease_ttl_seconds")
@max_lease_ttl_seconds.setter
def max_lease_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_lease_ttl_seconds", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The AWS region for API calls. Defaults to `us-east-1`.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter(name="stsEndpoint")
def sts_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a custom HTTP STS endpoint to use.
"""
return pulumi.get(self, "sts_endpoint")
@sts_endpoint.setter
def sts_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sts_endpoint", value)
@pulumi.input_type
class _SecretBackendState:
def __init__(__self__, *,
access_key: Optional[pulumi.Input[str]] = None,
default_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_endpoint: Optional[pulumi.Input[str]] = None,
max_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
path: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
sts_endpoint: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecretBackend resources.
:param pulumi.Input[str] access_key: The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[int] default_lease_ttl_seconds: The default TTL for credentials
issued by this backend.
:param pulumi.Input[str] description: A human-friendly description for this backend.
:param pulumi.Input[str] iam_endpoint: Specifies a custom HTTP IAM endpoint to use.
:param pulumi.Input[int] max_lease_ttl_seconds: The maximum TTL that can be requested
for credentials issued by this backend.
:param pulumi.Input[str] path: The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
:param pulumi.Input[str] region: The AWS region for API calls. Defaults to `us-east-1`.
:param pulumi.Input[str] secret_key: The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[str] sts_endpoint: Specifies a custom HTTP STS endpoint to use.
"""
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if default_lease_ttl_seconds is not None:
pulumi.set(__self__, "default_lease_ttl_seconds", default_lease_ttl_seconds)
if description is not None:
pulumi.set(__self__, "description", description)
if iam_endpoint is not None:
pulumi.set(__self__, "iam_endpoint", iam_endpoint)
if max_lease_ttl_seconds is not None:
pulumi.set(__self__, "max_lease_ttl_seconds", max_lease_ttl_seconds)
if path is not None:
pulumi.set(__self__, "path", path)
if region is not None:
pulumi.set(__self__, "region", region)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if sts_endpoint is not None:
pulumi.set(__self__, "sts_endpoint", sts_endpoint)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[pulumi.Input[str]]:
"""
The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="defaultLeaseTtlSeconds")
def default_lease_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The default TTL for credentials
issued by this backend.
"""
return pulumi.get(self, "default_lease_ttl_seconds")
@default_lease_ttl_seconds.setter
def default_lease_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_lease_ttl_seconds", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-friendly description for this backend.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="iamEndpoint")
def iam_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a custom HTTP IAM endpoint to use.
"""
return pulumi.get(self, "iam_endpoint")
@iam_endpoint.setter
def iam_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_endpoint", value)
@property
@pulumi.getter(name="maxLeaseTtlSeconds")
def max_lease_ttl_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The maximum TTL that can be requested
for credentials issued by this backend.
"""
return pulumi.get(self, "max_lease_ttl_seconds")
@max_lease_ttl_seconds.setter
def max_lease_ttl_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_lease_ttl_seconds", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The AWS region for API calls. Defaults to `us-east-1`.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter(name="stsEndpoint")
def sts_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a custom HTTP STS endpoint to use.
"""
return pulumi.get(self, "sts_endpoint")
@sts_endpoint.setter
def sts_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sts_endpoint", value)
class SecretBackend(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
default_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_endpoint: Optional[pulumi.Input[str]] = None,
max_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
path: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
sts_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
AWS secret backends can be imported using the `path`, e.g.
```sh
$ pulumi import vault:aws/secretBackend:SecretBackend aws aws
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_key: The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[int] default_lease_ttl_seconds: The default TTL for credentials
issued by this backend.
:param pulumi.Input[str] description: A human-friendly description for this backend.
:param pulumi.Input[str] iam_endpoint: Specifies a custom HTTP IAM endpoint to use.
:param pulumi.Input[int] max_lease_ttl_seconds: The maximum TTL that can be requested
for credentials issued by this backend.
:param pulumi.Input[str] path: The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
:param pulumi.Input[str] region: The AWS region for API calls. Defaults to `us-east-1`.
:param pulumi.Input[str] secret_key: The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[str] sts_endpoint: Specifies a custom HTTP STS endpoint to use.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SecretBackendArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
AWS secret backends can be imported using the `path`, e.g.
```sh
$ pulumi import vault:aws/secretBackend:SecretBackend aws aws
```
:param str resource_name: The name of the resource.
:param SecretBackendArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretBackendArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
default_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_endpoint: Optional[pulumi.Input[str]] = None,
max_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
path: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
sts_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretBackendArgs.__new__(SecretBackendArgs)
__props__.__dict__["access_key"] = access_key
__props__.__dict__["default_lease_ttl_seconds"] = default_lease_ttl_seconds
__props__.__dict__["description"] = description
__props__.__dict__["iam_endpoint"] = iam_endpoint
__props__.__dict__["max_lease_ttl_seconds"] = max_lease_ttl_seconds
__props__.__dict__["path"] = path
__props__.__dict__["region"] = region
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["sts_endpoint"] = sts_endpoint
super(SecretBackend, __self__).__init__(
'vault:aws/secretBackend:SecretBackend',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
default_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_endpoint: Optional[pulumi.Input[str]] = None,
max_lease_ttl_seconds: Optional[pulumi.Input[int]] = None,
path: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
sts_endpoint: Optional[pulumi.Input[str]] = None) -> 'SecretBackend':
"""
Get an existing SecretBackend resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_key: The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[int] default_lease_ttl_seconds: The default TTL for credentials
issued by this backend.
:param pulumi.Input[str] description: A human-friendly description for this backend.
:param pulumi.Input[str] iam_endpoint: Specifies a custom HTTP IAM endpoint to use.
:param pulumi.Input[int] max_lease_ttl_seconds: The maximum TTL that can be requested
for credentials issued by this backend.
:param pulumi.Input[str] path: The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
:param pulumi.Input[str] region: The AWS region for API calls. Defaults to `us-east-1`.
:param pulumi.Input[str] secret_key: The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
:param pulumi.Input[str] sts_endpoint: Specifies a custom HTTP STS endpoint to use.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretBackendState.__new__(_SecretBackendState)
__props__.__dict__["access_key"] = access_key
__props__.__dict__["default_lease_ttl_seconds"] = default_lease_ttl_seconds
__props__.__dict__["description"] = description
__props__.__dict__["iam_endpoint"] = iam_endpoint
__props__.__dict__["max_lease_ttl_seconds"] = max_lease_ttl_seconds
__props__.__dict__["path"] = path
__props__.__dict__["region"] = region
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["sts_endpoint"] = sts_endpoint
return SecretBackend(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Output[Optional[str]]:
"""
The AWS Access Key ID this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="defaultLeaseTtlSeconds")
def default_lease_ttl_seconds(self) -> pulumi.Output[int]:
"""
The default TTL for credentials
issued by this backend.
"""
return pulumi.get(self, "default_lease_ttl_seconds")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A human-friendly description for this backend.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="iamEndpoint")
def iam_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a custom HTTP IAM endpoint to use.
"""
return pulumi.get(self, "iam_endpoint")
@property
@pulumi.getter(name="maxLeaseTtlSeconds")
def max_lease_ttl_seconds(self) -> pulumi.Output[int]:
"""
The maximum TTL that can be requested
for credentials issued by this backend.
"""
return pulumi.get(self, "max_lease_ttl_seconds")
@property
@pulumi.getter
def path(self) -> pulumi.Output[Optional[str]]:
"""
The unique path this backend should be mounted at. Must
not begin or end with a `/`. Defaults to `aws`.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The AWS region for API calls. Defaults to `us-east-1`.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Output[Optional[str]]:
"""
The AWS Secret Key this backend should use to
issue new credentials. Vault uses the official AWS SDK to authenticate, and thus can also use standard AWS environment credentials, shared file credentials or IAM role/ECS task credentials.
"""
return pulumi.get(self, "secret_key")
@property
@pulumi.getter(name="stsEndpoint")
def sts_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a custom HTTP STS endpoint to use.
"""
return pulumi.get(self, "sts_endpoint")
| 45.525394
| 204
| 0.653203
| 3,222
| 25,995
| 5.061763
| 0.057728
| 0.081611
| 0.079833
| 0.084984
| 0.901895
| 0.892145
| 0.887485
| 0.879821
| 0.87277
| 0.861733
| 0
| 0.000412
| 0.252933
| 25,995
| 570
| 205
| 45.605263
| 0.839392
| 0.352375
| 0
| 0.845912
| 1
| 0
| 0.0959
| 0.033591
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163522
| false
| 0.003145
| 0.015723
| 0
| 0.27673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ea289fc21be2bf2347b4d418f593ab99851592fb
| 92
|
py
|
Python
|
scrutiny/__init__.py
|
ardinor/scrutiny
|
3769e9db380b06ff45b0106dd5c4d727ecb2cb2f
|
[
"MIT"
] | null | null | null |
scrutiny/__init__.py
|
ardinor/scrutiny
|
3769e9db380b06ff45b0106dd5c4d727ecb2cb2f
|
[
"MIT"
] | null | null | null |
scrutiny/__init__.py
|
ardinor/scrutiny
|
3769e9db380b06ff45b0106dd5c4d727ecb2cb2f
|
[
"MIT"
] | null | null | null |
from scrutiny.scrutiny import *
from scrutiny.models import *
from scrutiny.tests import *
| 18.4
| 31
| 0.793478
| 12
| 92
| 6.083333
| 0.416667
| 0.493151
| 0.493151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 4
| 32
| 23
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ea2ee794aa3e8622c79ea37fa5613640077da648
| 28,021
|
py
|
Python
|
isi_sdk/apis/filepool_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/apis/filepool_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/apis/filepool_api.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FilepoolApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FilepoolApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_filepool_policy(self, filepool_policy, **kwargs):
"""
Create a new policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_filepool_policy(filepool_policy, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FilepoolPolicyCreateParams filepool_policy: (required)
:return: CreateFilepoolPolicyResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy' is set
if ('filepool_policy' not in params) or (params['filepool_policy'] is None):
raise ValueError("Missing the required parameter `filepool_policy` when calling `create_filepool_policy`")
resource_path = '/platform/1/filepool/policies'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filepool_policy' in params:
body_params = params['filepool_policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateFilepoolPolicyResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_filepool_policy(self, filepool_policy_id, **kwargs):
"""
Delete file pool policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_filepool_policy(filepool_policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_policy_id: Delete file pool policy. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy_id' is set
if ('filepool_policy_id' not in params) or (params['filepool_policy_id'] is None):
raise ValueError("Missing the required parameter `filepool_policy_id` when calling `delete_filepool_policy`")
resource_path = '/platform/1/filepool/policies/{FilepoolPolicyId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_policy_id' in params:
path_params['FilepoolPolicyId'] = params['filepool_policy_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_default_policy(self, **kwargs):
"""
List default file pool policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_default_policy(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FilepoolDefaultPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_default_policy" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/filepool/default-policy'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolDefaultPolicy',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_policy(self, filepool_policy_id, **kwargs):
"""
Retrieve file pool policy information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_policy(filepool_policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_policy_id: Retrieve file pool policy information. (required)
:return: FilepoolPolicies
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy_id' is set
if ('filepool_policy_id' not in params) or (params['filepool_policy_id'] is None):
raise ValueError("Missing the required parameter `filepool_policy_id` when calling `get_filepool_policy`")
resource_path = '/platform/1/filepool/policies/{FilepoolPolicyId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_policy_id' in params:
path_params['FilepoolPolicyId'] = params['filepool_policy_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolPolicies',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_template(self, filepool_template_id, **kwargs):
"""
List all templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_template(filepool_template_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_template_id: List all templates. (required)
:return: FilepoolTemplates
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_template_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_template_id' is set
if ('filepool_template_id' not in params) or (params['filepool_template_id'] is None):
raise ValueError("Missing the required parameter `filepool_template_id` when calling `get_filepool_template`")
resource_path = '/platform/1/filepool/templates/{FilepoolTemplateId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_template_id' in params:
path_params['FilepoolTemplateId'] = params['filepool_template_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolTemplates',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_templates(self, **kwargs):
"""
List all templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_templates(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FilepoolTemplates
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_templates" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/filepool/templates'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolTemplates',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def list_filepool_policies(self, **kwargs):
"""
List all policies.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_filepool_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FilepoolPoliciesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_filepool_policies" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/filepool/policies'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolPoliciesExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_filepool_default_policy(self, filepool_default_policy, **kwargs):
"""
Set default file pool policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_filepool_default_policy(filepool_default_policy, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FilepoolDefaultPolicyExtended filepool_default_policy: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_default_policy']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_filepool_default_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_default_policy' is set
if ('filepool_default_policy' not in params) or (params['filepool_default_policy'] is None):
raise ValueError("Missing the required parameter `filepool_default_policy` when calling `update_filepool_default_policy`")
resource_path = '/platform/1/filepool/default-policy'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filepool_default_policy' in params:
body_params = params['filepool_default_policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_filepool_policy(self, filepool_policy, filepool_policy_id, **kwargs):
"""
Modify file pool policy. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_filepool_policy(filepool_policy, filepool_policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FilepoolPolicy filepool_policy: (required)
:param str filepool_policy_id: Modify file pool policy. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy', 'filepool_policy_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy' is set
if ('filepool_policy' not in params) or (params['filepool_policy'] is None):
raise ValueError("Missing the required parameter `filepool_policy` when calling `update_filepool_policy`")
# verify the required parameter 'filepool_policy_id' is set
if ('filepool_policy_id' not in params) or (params['filepool_policy_id'] is None):
raise ValueError("Missing the required parameter `filepool_policy_id` when calling `update_filepool_policy`")
resource_path = '/platform/1/filepool/policies/{FilepoolPolicyId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_policy_id' in params:
path_params['FilepoolPolicyId'] = params['filepool_policy_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filepool_policy' in params:
body_params = params['filepool_policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 37.968835
| 139
| 0.551336
| 2,670
| 28,021
| 5.568914
| 0.080524
| 0.063084
| 0.026229
| 0.027843
| 0.879279
| 0.856682
| 0.850024
| 0.835967
| 0.835026
| 0.835026
| 0
| 0.001133
| 0.370222
| 28,021
| 737
| 140
| 38.020353
| 0.841446
| 0.265158
| 0
| 0.819629
| 0
| 0
| 0.181239
| 0.049076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026525
| false
| 0
| 0.018568
| 0
| 0.071618
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea3abb24e745d7ffa490903970d14fcff455fc83
| 5,305
|
py
|
Python
|
tests/slippinj/databases/drivers/test_sqlserver.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 7
|
2016-03-31T06:17:23.000Z
|
2018-01-25T15:25:05.000Z
|
tests/slippinj/databases/drivers/test_sqlserver.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 8
|
2016-03-30T18:45:09.000Z
|
2017-06-19T09:21:35.000Z
|
tests/slippinj/databases/drivers/test_sqlserver.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 13
|
2017-04-21T08:17:14.000Z
|
2019-07-12T04:59:24.000Z
|
import logging
from mock import Mock
from slippinj.databases.drivers.sqlserver import Sqlserver
class TestSqlserver:
def setup_method(self, method):
self.logger = logging.getLogger('test')
self.logger.addHandler(logging.NullHandler())
def teardown_method(self, method):
self.logger = None
def test_get_tables_info_when_no_table_list_is_provided(self):
mocked_table_list_query_cursor = Mock()
mocked_table_list_query_cursor.execute = Mock(return_value=True)
mocked_table_list_query_cursor.fetchall = Mock(return_value=[{'table_name': 'unit'}, {'table_name': 'test'}])
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_list_query_cursor, mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {'test': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []},
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info(None, None, None)
def test_get_tables_info_when_table_list_has_been_provided(self):
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info('unit', None, None)
| 46.130435
| 123
| 0.556456
| 514
| 5,305
| 5.315175
| 0.142023
| 0.112738
| 0.087848
| 0.061493
| 0.897145
| 0.854685
| 0.84224
| 0.821742
| 0.821742
| 0.804173
| 0
| 0.004292
| 0.341188
| 5,305
| 114
| 124
| 46.535088
| 0.777396
| 0
| 0
| 0.739583
| 0
| 0
| 0.12328
| 0.030537
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.041667
| false
| 0
| 0.03125
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
575ef0b42161bd74747e94dde836117ddebee906
| 176,855
|
py
|
Python
|
symFile.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | 3
|
2020-06-22T15:02:23.000Z
|
2021-05-05T14:03:25.000Z
|
symFile.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | null | null | null |
symFile.py
|
shurkova/currentVers
|
25027f3f4faa9033b69041459f0785c1436c3f31
|
[
"CECILL-B"
] | 11
|
2020-05-01T09:03:14.000Z
|
2022-02-09T14:17:41.000Z
|
simType='sim_file'
symProps = [
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.703780466095', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.66875', 1, 'Y', 0.66874999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.790476190476', 1, 'XVel', 0.79047619047619044, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.66875', 1, 'YVel', 0.66874999999999996, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.45709788914', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.485714285714', 1, 'X', 0.48571428571428571, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.125', 1, 'Y', 0.125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.485714285714', 1, 'XVel', 0.48571428571428571, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.125', 1, 'YVel', 0.125, 'value']]}], 'name': 'state_0.312755165415', 'analog': 0},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.682315198268', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.53125', 1, 'Y', 0.53125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1375', 1, 'YVel', -0.13749999999999996, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.117847058516', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.519047619048', 1, 'X', 0.51904761904761909, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333381, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043750000000000011, 'value']]}], 'name': 'state_0.805988837425', 'analog': 1},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.450991429235', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.525', 1, 'Y', 0.52500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.228629503255', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.547619047619', 1, 'X', 0.54761904761904767, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.20625', 1, 'Y', 0.20624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.931435114204', 'analog': 2},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.813031525206', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.525', 1, 'Y', 0.52500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.347015379734', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.25', 1, 'Y', 0.25, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043750000000000011, 'value']]}], 'name': 'state_0.0998246763924', 'analog': 3},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.321824707421', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3875', 1, 'Y', 0.38750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1375', 1, 'YVel', -0.13750000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.68575015763', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.619047619048', 1, 'X', 0.61904761904761907, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3', 1, 'Y', 0.29999999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.049999999999999989, 'value']]}], 'name': 'state_0.913981746386', 'analog': 4},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.933134657413', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.31875', 1, 'Y', 0.31874999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.06875', 1, 'YVel', -0.068750000000000033, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0328524097159', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.647619047619', 1, 'X', 0.64761904761904765, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3375', 1, 'Y', 0.33750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000033, 'value']]}], 'name': 'state_0.569462804023', 'analog': 5},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0833369601241', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1375', 1, 'YVel', -0.13749999999999998, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.76060529193', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.680952380952', 1, 'X', 0.68095238095238098, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.38125', 1, 'Y', 0.38124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.25877150862', 'analog': 6},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.334827391427', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.10625', 1, 'Y', 0.10625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.075', 1, 'YVel', -0.074999999999999997, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.140585390861', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.704761904762', 1, 'X', 0.70476190476190481, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4125', 1, 'Y', 0.41249999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.499237953355', 'analog': 7},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.716953916028', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999917, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.846007203824', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.728571428571', 1, 'X', 0.72857142857142854, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.44375', 1, 'Y', 0.44374999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.610563344638', 'analog': 8},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.572251691779', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.416336715325', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.766666666667', 1, 'X', 0.76666666666666672, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.49375', 1, 'Y', 0.49375000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238182, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.050000000000000044, 'value']]}], 'name': 'state_0.269390562985', 'analog': 9},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.991853825664', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.50292118116', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.795238095238', 1, 'X', 0.79523809523809519, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.53125', 1, 'Y', 0.53125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.02857142857142847, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.422879977216', 'analog': 10},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0617213932038', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.175', 1, 'Y', 0.17499999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.075', 1, 'YVel', 0.074999999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.343393808819', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.0872759768396', 'analog': 11},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.83792737958', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062500000000000056, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.725603079094', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.380433043334', 'analog': 12},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.716380001772', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.08125', 1, 'YVel', -0.081249999999999989, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.378670801612', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.65612181125', 'analog': 13},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.902586206642', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0796590825069', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.101148492471', 'analog': 14},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.33774197818', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.854875428441', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.973065579554', 'analog': 15},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.783696332506', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.468466883273', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.504761904762', 1, 'X', 0.50476190476190474, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45', 1, 'Y', 0.45000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.290476190476', 1, 'XVel', -0.29047619047619044, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.08125', 1, 'YVel', -0.081249999999999989, 'value']]}], 'name': 'state_0.713874946261', 'analog': 16},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.790168048441', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.853264401851', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.533333333333', 1, 'X', 0.53333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4125', 1, 'Y', 0.41249999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000033, 'value']]}], 'name': 'state_0.0102421174153', 'analog': 17},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.538531396015', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.20625', 1, 'Y', 0.20624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.0625, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.814216360946', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.552380952381', 1, 'X', 0.55238095238095242, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3875', 1, 'Y', 0.38750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.024999999999999967, 'value']]}], 'name': 'state_0.938816835237', 'analog': 18},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.963285201231', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.21875', 1, 'Y', 0.21875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012500000000000011, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.163515554686', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35', 1, 'Y', 0.34999999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000033, 'value']]}], 'name': 'state_0.551584889122', 'analog': 19},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.734283416162', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.275', 1, 'Y', 0.27500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.349168588175', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.6', 1, 'X', 0.59999999999999998, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.325', 1, 'Y', 0.32500000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.024999999999999967, 'value']]}], 'name': 'state_0.609972294527', 'analog': 20},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.982170541635', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.225', 1, 'Y', 0.22500000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000017, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.611118863365', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.628571428571', 1, 'X', 0.62857142857142856, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000033, 'value']]}], 'name': 'state_0.953636800645', 'analog': 21},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.474749941466', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.125', 1, 'YVel', -0.125, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.525348309716', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.657142857143', 1, 'X', 0.65714285714285714, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.25', 1, 'Y', 0.25, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.912493262316', 'analog': 22},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.317092435523', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.533016197981', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.685714285714', 1, 'X', 0.68571428571428572, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2125', 1, 'Y', 0.21249999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000006, 'value']]}], 'name': 'state_0.920340052655', 'analog': 23},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0242969415528', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.075626924385', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.714285714286', 1, 'X', 0.7142857142857143, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.175', 1, 'Y', 0.17499999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000006, 'value']]}], 'name': 'state_0.768797606073', 'analog': 24},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.873552177434', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.424488059332', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.747619047619', 1, 'X', 0.74761904761904763, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.13125', 1, 'Y', 0.13125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043749999999999983, 'value']]}], 'name': 'state_0.760892693994', 'analog': 25},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.03338342713', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.484337643913', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.771428571429', 1, 'X', 0.77142857142857146, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.08125', 1, 'Y', 0.081250000000000003, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000003, 'value']]}], 'name': 'state_0.171331885574', 'analog': 26},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.275231897672', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.919107711826', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.738095238095', 1, 'X', 0.73809523809523814, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.075', 1, 'Y', 0.074999999999999997, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062500000000000056, 'value']]}], 'name': 'state_0.33801131865', 'analog': 27},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.540614796748', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.25160840835', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.719047619048', 1, 'X', 0.71904761904761905, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1125', 1, 'Y', 0.1125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}], 'name': 'state_0.0572920418002', 'analog': 28},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.868507822939', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.560986313113', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.7', 1, 'X', 0.69999999999999996, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.15', 1, 'Y', 0.14999999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999992, 'value']]}], 'name': 'state_0.761068154432', 'analog': 29},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.368599304491', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.176431209773', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.671428571429', 1, 'X', 0.67142857142857137, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.20625', 1, 'Y', 0.20624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056249999999999994, 'value']]}], 'name': 'state_0.333315635151', 'analog': 30},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.630239200564', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.645944556734', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.642857142857', 1, 'X', 0.6428571428571429, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.02857142857142847, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.799774305242', 'analog': 31},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.491584739446', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043749999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.227734496155', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.62380952381', 1, 'X', 0.62380952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3', 1, 'Y', 0.29999999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.228666140286', 'analog': 32},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.597505719606', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.804218403641', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.604761904762', 1, 'X', 0.60476190476190472, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3375', 1, 'Y', 0.33750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000033, 'value']]}], 'name': 'state_0.581275064199', 'analog': 33},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.183417259221', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.178312104711', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.38125', 1, 'Y', 0.38124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0238095238095', 1, 'XVel', -0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.472107773565', 'analog': 34},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.397995159042', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.361329664307', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.552380952381', 1, 'X', 0.55238095238095242, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4375', 1, 'Y', 0.4375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.505172213515', 'analog': 35},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.632496148437', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.10625', 1, 'Y', 0.10625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062499999999999917, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.738802402424', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.533333333333', 1, 'X', 0.53333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.475', 1, 'Y', 0.47499999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.784867701877', 'analog': 36},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.804113492347', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999917, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.984545732596', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.504761904762', 1, 'X', 0.50476190476190474, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.53125', 1, 'Y', 0.53125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.362998357521', 'analog': 37},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.606671123744', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.486055333058', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.471428571429', 1, 'X', 0.47142857142857142, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6', 1, 'Y', 0.59999999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068749999999999978, 'value']]}], 'name': 'state_0.16250011178', 'analog': 38},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.905756069633', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.189772773103', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.442857142857', 1, 'X', 0.44285714285714284, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.65625', 1, 'Y', 0.65625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.615713856582', 'analog': 39},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.721237466457', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0213583275532', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.414285714286', 1, 'X', 0.41428571428571431, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.7125', 1, 'Y', 0.71250000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428525, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.692960176737', 'analog': 40},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.737515008948', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.06875', 1, 'YVel', -0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.672032157379', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.380952380952', 1, 'X', 0.38095238095238093, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.775', 1, 'Y', 0.77500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333381, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.0625, 'value']]}], 'name': 'state_0.8108132534', 'analog': 41},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.913791648627', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0193499238152', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.347619047619', 1, 'X', 0.34761904761904761, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.84375', 1, 'Y', 0.84375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068749999999999978, 'value']]}], 'name': 'state_0.100816271515', 'analog': 42},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.812988856656', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.346581375008', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.342857142857', 1, 'X', 0.34285714285714286, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8875', 1, 'Y', 0.88749999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0047619047619', 1, 'XVel', -0.004761904761904745, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.954217578532', 'analog': 43},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.826454543756', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.845222168937', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.366666666667', 1, 'X', 0.36666666666666664, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.934375', 1, 'Y', 0.93437499999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.02380952380952378, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.046875', 1, 'YVel', 0.046875, 'value']]}], 'name': 'state_0.579817956582', 'analog': 44},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.877898552606', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.529957502421', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.395238095238', 1, 'X', 0.39523809523809522, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.91875', 1, 'Y', 0.91874999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.015625', 1, 'YVel', -0.015625, 'value']]}], 'name': 'state_0.0348425828186', 'analog': 45},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.817971084179', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.449397278446', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.419047619048', 1, 'X', 0.41904761904761906, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.86875', 1, 'Y', 0.86875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999933, 'value']]}], 'name': 'state_0.295945618667', 'analog': 46},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.284543029581', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.923867016168', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.447619047619', 1, 'X', 0.44761904761904764, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8125', 1, 'Y', 0.8125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.39894522894', 'analog': 47},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.952118276063', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1375', 1, 'Y', 0.13750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.561114926911', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.466666666667', 1, 'X', 0.46666666666666667, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.775', 1, 'Y', 0.77500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619035, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.767983082208', 'analog': 48},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.290323040219', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.946119337754', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.490476190476', 1, 'X', 0.49047619047619045, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.73125', 1, 'Y', 0.73124999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.02380952380952378, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043750000000000067, 'value']]}], 'name': 'state_0.493212973834', 'analog': 49},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.759131260391', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.792676377629', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.519047619048', 1, 'X', 0.51904761904761909, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.675', 1, 'Y', 0.67500000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428636, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056249999999999911, 'value']]}], 'name': 'state_0.0139589361687', 'analog': 50},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0537149525687', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.779638492567', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.547619047619', 1, 'X', 0.54761904761904767, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.61875', 1, 'Y', 0.61875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.0119253863359', 'analog': 51},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.3239212741', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.256857010813', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.571428571429', 1, 'X', 0.5714285714285714, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.56875', 1, 'Y', 0.56874999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000044, 'value']]}], 'name': 'state_0.437053040484', 'analog': 52},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0646167346215', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.954790340331', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.6', 1, 'X', 0.59999999999999998, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5125', 1, 'Y', 0.51249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.398772192179', 'analog': 53},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0805903486829', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.528607760977', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.633333333333', 1, 'X', 0.6333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45', 1, 'Y', 0.45000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0625', 1, 'YVel', -0.062499999999999944, 'value']]}], 'name': 'state_0.467483336605', 'analog': 54},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.686241613593', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.768279937749', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.671428571429', 1, 'X', 0.67142857142857137, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.375', 1, 'Y', 0.375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.075', 1, 'YVel', -0.075000000000000011, 'value']]}], 'name': 'state_0.856054583025', 'analog': 55},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.704897204143', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.113936754944', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.695238095238', 1, 'X', 0.69523809523809521, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.325', 1, 'Y', 0.32500000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999989, 'value']]}], 'name': 'state_0.983773206117', 'analog': 56},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.15869858973', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.10625', 1, 'Y', 0.10625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062499999999999917, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.460413599888', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.72380952381', 1, 'X', 0.72380952380952379, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.26875', 1, 'Y', 0.26874999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.811350948953', 'analog': 57},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.660071619196', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.10625', 1, 'Y', 0.10625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.839410880052', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.757142857143', 1, 'X', 0.75714285714285712, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.20625', 1, 'Y', 0.20624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0625', 1, 'YVel', -0.0625, 'value']]}], 'name': 'state_0.669521820011', 'analog': 58},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.681016176084', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.788095238095', 1, 'X', 0.78809523809523807, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.103125', 1, 'Y', 0.10312499999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.00238095238095', 1, 'XVel', -0.0023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.003125', 1, 'YVel', -0.0031250000000000028, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.14897496169', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.192609099077', 'analog': 59},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.788231369123', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.00238095238095', 1, 'XVel', 0.0023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.003125', 1, 'YVel', -0.0031249999999999889, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.888492279614', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.761904761905', 1, 'X', 0.76190476190476186, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.19375', 1, 'Y', 0.19375000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0047619047619', 1, 'XVel', 0.004761904761904745, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0125', 1, 'YVel', -0.012499999999999983, 'value']]}], 'name': 'state_0.0626652148594', 'analog': 60},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.76365710391', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1375', 1, 'Y', 0.13750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.683642979093', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.742857142857', 1, 'X', 0.74285714285714288, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.23125', 1, 'Y', 0.23125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}], 'name': 'state_0.673631948714', 'analog': 61},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.359411421967', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.288409899193', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.714285714286', 1, 'X', 0.7142857142857143, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056249999999999967, 'value']]}], 'name': 'state_0.41351555333', 'analog': 62},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.512485635107', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1375', 1, 'Y', 0.13750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.487260404408', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.690476190476', 1, 'X', 0.69047619047619047, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3375', 1, 'Y', 0.33750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0238095238095', 1, 'XVel', -0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.050000000000000044, 'value']]}], 'name': 'state_0.97886621726', 'analog': 63},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.354476859837', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.960258570896', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.657142857143', 1, 'X', 0.65714285714285714, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4', 1, 'Y', 0.40000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.0625, 'value']]}], 'name': 'state_0.0265754048498', 'analog': 64},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0388334417733', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0953229315684', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.628571428571', 1, 'X', 0.62857142857142856, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45625', 1, 'Y', 0.45624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056249999999999967, 'value']]}], 'name': 'state_0.247535389877', 'analog': 65},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.875332491872', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.643476592671', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.604761904762', 1, 'X', 0.60476190476190472, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.50625', 1, 'Y', 0.50624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0238095238095', 1, 'XVel', -0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.049999999999999989, 'value']]}], 'name': 'state_0.128965915191', 'analog': 66},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.010247806794', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012499999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.193758390556', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.57619047619', 1, 'X', 0.57619047619047614, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5625', 1, 'Y', 0.5625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.237242487153', 'analog': 67},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.962924840148', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.00840000464232', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.547619047619', 1, 'X', 0.54761904761904767, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.61875', 1, 'Y', 0.61875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.02857142857142847, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056250000000000022, 'value']]}], 'name': 'state_0.43561225895', 'analog': 68},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0624579093116', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.842979819701', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.528571428571', 1, 'X', 0.52857142857142858, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.65625', 1, 'Y', 0.65625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.205332705272', 'analog': 69},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.733760572633', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.137139880546', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.495238095238', 1, 'X', 0.49523809523809526, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.71875', 1, 'Y', 0.71875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.0625, 'value']]}], 'name': 'state_0.509810042464', 'analog': 70},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.680674793165', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.594470477034', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.471428571429', 1, 'X', 0.47142857142857142, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.76875', 1, 'Y', 0.76875000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0238095238095', 1, 'XVel', -0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.050000000000000044, 'value']]}], 'name': 'state_0.362069938732', 'analog': 71},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.692430307999', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.275', 1, 'Y', 0.27500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.09375', 1, 'YVel', 0.093750000000000028, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.646855309277', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.442857142857', 1, 'X', 0.44285714285714284, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.825', 1, 'Y', 0.82499999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056249999999999911, 'value']]}], 'name': 'state_0.960600976706', 'analog': 72},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.450366371735', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.28125', 1, 'Y', 0.28125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.847158240153', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.419047619048', 1, 'X', 0.41904761904761906, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.86875', 1, 'Y', 0.86875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0238095238095', 1, 'XVel', -0.02380952380952378, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043750000000000067, 'value']]}], 'name': 'state_0.412489728668', 'analog': 73},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.585448608601', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.28125', 1, 'Y', 0.28125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.157143337496', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.385714285714', 1, 'X', 0.38571428571428573, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.934375', 1, 'Y', 0.93437499999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.065625', 1, 'YVel', 0.065624999999999933, 'value']]}], 'name': 'state_0.65898299143', 'analog': 74},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.529615506662', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.28125', 1, 'Y', 0.28125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0563903199434', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.357142857143', 1, 'X', 0.35714285714285715, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.91875', 1, 'Y', 0.91874999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.015625', 1, 'YVel', -0.015625, 'value']]}], 'name': 'state_0.0817423865584', 'analog': 75},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.753248263844', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.01875', 1, 'YVel', -0.018749999999999989, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.572445163785', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.338095238095', 1, 'X', 0.33809523809523812, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.88125', 1, 'Y', 0.88124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0190476190476', 1, 'XVel', -0.019047619047619035, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.157688560299', 'analog': 76},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.879717647757', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.775811149392', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.338095238095', 1, 'X', 0.33809523809523812, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.825', 1, 'Y', 0.82499999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.813939842996', 'analog': 77},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.633399481022', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.549978910938', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.361904761905', 1, 'X', 0.3619047619047619, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.775', 1, 'Y', 0.77500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.02380952380952378, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999933, 'value']]}], 'name': 'state_0.577260049692', 'analog': 78},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.762229571114', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.31875', 1, 'Y', 0.31874999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05625', 1, 'YVel', 0.056249999999999967, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.861705860538', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.390476190476', 1, 'X', 0.39047619047619048, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.71875', 1, 'Y', 0.71875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.0936719495939', 'analog': 79},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.178669501075', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45625', 1, 'Y', 0.45624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1375', 1, 'YVel', 0.13750000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.224491294964', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.419047619048', 1, 'X', 0.41904761904761906, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6625', 1, 'Y', 0.66249999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.00393922705088', 'analog': 80},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.054255848302', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.40625', 1, 'Y', 0.40625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999989, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0267647853842', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.442857142857', 1, 'X', 0.44285714285714284, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.61875', 1, 'Y', 0.61875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.02380952380952378, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043749999999999956, 'value']]}], 'name': 'state_0.184193648621', 'analog': 81},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.550468731558', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4', 1, 'Y', 0.40000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.759411010307', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.471428571429', 1, 'X', 0.47142857142857142, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5625', 1, 'Y', 0.5625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.0389987684047', 'analog': 82},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.515186660774', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4', 1, 'Y', 0.40000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.169593089389', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.495238095238', 1, 'X', 0.49523809523809526, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5125', 1, 'Y', 0.51249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000044, 'value']]}], 'name': 'state_0.788574182089', 'analog': 83},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.556276313556', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4', 1, 'Y', 0.40000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.261919396027', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.52380952381', 1, 'X', 0.52380952380952384, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45625', 1, 'Y', 0.45624999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056249999999999967, 'value']]}], 'name': 'state_0.349193574488', 'analog': 84},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.971747473445', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3625', 1, 'Y', 0.36249999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000033, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.331892259557', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.561904761905', 1, 'X', 0.56190476190476191, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.38125', 1, 'Y', 0.38124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.075', 1, 'YVel', -0.075000000000000011, 'value']]}], 'name': 'state_0.805562366973', 'analog': 85},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.491117058481', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3625', 1, 'Y', 0.36249999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.280802793121', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.585714285714', 1, 'X', 0.58571428571428574, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3375', 1, 'Y', 0.33750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043749999999999956, 'value']]}], 'name': 'state_0.548298425925', 'analog': 86},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.743208371766', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3625', 1, 'Y', 0.36249999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.143607095815', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.604761904762', 1, 'X', 0.60476190476190472, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3', 1, 'Y', 0.29999999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000033, 'value']]}], 'name': 'state_0.472013422447', 'analog': 87},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.500178793134', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.23125', 1, 'Y', 0.23125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.13125', 1, 'YVel', -0.13124999999999998, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.540035769674', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.638095238095', 1, 'X', 0.63809523809523805, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.23125', 1, 'Y', 0.23125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.06875', 1, 'YVel', -0.068749999999999978, 'value']]}], 'name': 'state_0.80898545332', 'analog': 88},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.800916789877', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.25625', 1, 'Y', 0.25624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.025', 1, 'YVel', 0.024999999999999967, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.902336651824', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.666666666667', 1, 'X', 0.66666666666666663, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.175', 1, 'Y', 0.17499999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}], 'name': 'state_0.417662815535', 'analog': 89},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.619374003951', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2', 1, 'Y', 0.20000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056249999999999967, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.505711841964', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.685714285714', 1, 'X', 0.68571428571428572, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1375', 1, 'Y', 0.13750000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.387426436103', 'analog': 90},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.356586621664', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1', 1, 'YVel', -0.10000000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.175120063636', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.72380952381', 1, 'X', 0.72380952380952379, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0625', 1, 'Y', 0.0625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.075', 1, 'YVel', -0.075000000000000011, 'value']]}], 'name': 'state_0.328326861292', 'analog': 91},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.523242176021', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.14375', 1, 'Y', 0.14374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.371229182181', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.752380952381', 1, 'X', 0.75238095238095237, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.08125', 1, 'Y', 0.081250000000000003, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.01875', 1, 'YVel', 0.018750000000000003, 'value']]}], 'name': 'state_0.643556114525', 'analog': 92},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0176707835681', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.28125', 1, 'Y', 0.28125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1375', 1, 'YVel', 0.13750000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.210194862002', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.15625', 1, 'Y', 0.15625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.075', 1, 'YVel', 0.074999999999999997, 'value']]}], 'name': 'state_0.758528546855', 'analog': 93},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.580355707312', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1875', 1, 'Y', 0.1875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.09375', 1, 'YVel', -0.09375, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0471022267876', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.778457941077', 'analog': 94},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.397899229791', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1875', 1, 'Y', 0.1875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0527680699203', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.703526704411', 'analog': 95},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.214998478607', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1', 1, 'YVel', 0.099999999999999978, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.386914478379', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.720081566461', 'analog': 96},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.870815932243', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.526952320442', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.994457894167', 'analog': 97},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.405123158541', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.131196919272', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.471428571429', 1, 'X', 0.47142857142857142, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.50625', 1, 'Y', 0.50624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.319047619048', 1, 'XVel', -0.31904761904761902, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.35', 1, 'YVel', 0.34999999999999998, 'value']]}], 'name': 'state_0.276582308152', 'analog': 98},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.829404653913', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.282545093465', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.495238095238', 1, 'X', 0.49523809523809526, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5375', 1, 'Y', 0.53749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.257708201946', 'analog': 99},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.754816340439', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.588325289708', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.52380952381', 1, 'X', 0.52380952380952384, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.575', 1, 'Y', 0.57499999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.0204388689859', 'analog': 100},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.440779284591', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35', 1, 'Y', 0.34999999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.0625, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.906037521045', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.552380952381', 1, 'X', 0.55238095238095242, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6125', 1, 'Y', 0.61250000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000089, 'value']]}], 'name': 'state_0.0463552848108', 'analog': 101},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.679921882992', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35625', 1, 'Y', 0.35625000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062500000000000333, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.98309191313', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.65', 1, 'Y', 0.65000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.504762557984', 'analog': 102},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.175080561684', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35625', 1, 'Y', 0.35625000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.378562879721', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.604761904762', 1, 'X', 0.60476190476190472, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.68125', 1, 'Y', 0.68125000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.998427068703', 'analog': 103},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.982725012655', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35625', 1, 'Y', 0.35625000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.395467757885', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.633333333333', 1, 'X', 0.6333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.71875', 1, 'Y', 0.71875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.503282757323', 'analog': 104},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.886342972614', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.49375', 1, 'Y', 0.49375000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1375', 1, 'YVel', 0.13750000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.172033296257', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.661904761905', 1, 'X', 0.66190476190476188, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.75625', 1, 'Y', 0.75624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.217514816877', 'analog': 105},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.959701354233', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.50625', 1, 'Y', 0.50624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012499999999999956, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.162897833367', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.685714285714', 1, 'X', 0.68571428571428572, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.7875', 1, 'Y', 0.78749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.652440833011', 'analog': 106},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.361846783261', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.43125', 1, 'Y', 0.43125000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.075', 1, 'YVel', -0.074999999999999956, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.970254879799', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.704761904762', 1, 'X', 0.70476190476190481, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8125', 1, 'Y', 0.8125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.025', 1, 'YVel', 0.025000000000000022, 'value']]}], 'name': 'state_0.41093827345', 'analog': 107},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.624242741604', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.33125', 1, 'Y', 0.33124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1', 1, 'YVel', -0.10000000000000003, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.870711999595', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.728571428571', 1, 'X', 0.72857142857142854, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.84375', 1, 'Y', 0.84375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.743418682364', 'analog': 108},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0553371351812', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.325', 1, 'Y', 0.32500000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.396124252253', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.761904761905', 1, 'X', 0.76190476190476186, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8875', 1, 'Y', 0.88749999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.344645742977', 'analog': 109},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.84072578135', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.26875', 1, 'Y', 0.26874999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.623215103264', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.785714285714', 1, 'X', 0.7857142857142857, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.91875', 1, 'Y', 0.91874999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.932150863058', 'analog': 110},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.00779342499033', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.95308010983', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.844152322427', 'analog': 111},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.499801613793', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0103914995277', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.573628157316', 'analog': 112},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.304012567325', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.884504002358', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.771169700587', 'analog': 113},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.384570514251', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.309084576197', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.401834776798', 'analog': 114},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.384397846465', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.297517658916', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.0343231473812', 'analog': 115},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.544209900268', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1125', 1, 'Y', 0.1125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.15', 1, 'YVel', -0.15000000000000002, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.371068323877', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.471428571429', 1, 'X', 0.47142857142857142, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.89375', 1, 'Y', 0.89375000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.314285714286', 1, 'XVel', -0.31428571428571428, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.024999999999999911, 'value']]}], 'name': 'state_0.422711526547', 'analog': 116},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.7856335087', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1125', 1, 'Y', 0.1125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.791391251586', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.495238095238', 1, 'X', 0.49523809523809526, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8625', 1, 'Y', 0.86250000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.03125', 1, 'YVel', -0.03125, 'value']]}], 'name': 'state_0.351890962826', 'analog': 117},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.304199358041', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0125', 1, 'YVel', -0.012499999999999997, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.439028468244', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.514285714286', 1, 'X', 0.51428571428571423, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8375', 1, 'Y', 0.83750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.025000000000000022, 'value']]}], 'name': 'state_0.34583119359', 'analog': 118},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.300827632749', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.528014765787', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.538095238095', 1, 'X', 0.53809523809523807, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.80625', 1, 'Y', 0.80625000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.03125', 1, 'YVel', -0.03125, 'value']]}], 'name': 'state_0.0808349469935', 'analog': 119},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.788054070664', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.06875', 1, 'YVel', -0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.375774060443', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.561904761905', 1, 'X', 0.56190476190476191, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.775', 1, 'Y', 0.77500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523836, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.03125', 1, 'YVel', -0.03125, 'value']]}], 'name': 'state_0.38208945241', 'analog': 120},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0265342713918', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.55884617452', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.75', 1, 'Y', 0.75, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.025000000000000022, 'value']]}], 'name': 'state_0.398614852402', 'analog': 121},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.822949332779', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.845108264899', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.6', 1, 'X', 0.59999999999999998, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.725', 1, 'Y', 0.72499999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.025000000000000022, 'value']]}], 'name': 'state_0.316922161988', 'analog': 122},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.766365926414', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.993132425886', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.628571428571', 1, 'X', 0.62857142857142856, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6875', 1, 'Y', 0.6875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.237521664845', 'analog': 123},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.841959620113', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.735399522449', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.657142857143', 1, 'X', 0.65714285714285714, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.65', 1, 'Y', 0.65000000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.414843474285', 'analog': 124},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.274078167422', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0615970645758', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.685714285714', 1, 'X', 0.68571428571428572, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6125', 1, 'Y', 0.61250000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.0192451460662', 'analog': 125},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.44863518911', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.905087193236', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.704761904762', 1, 'X', 0.70476190476190481, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5875', 1, 'Y', 0.58750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.025000000000000022, 'value']]}], 'name': 'state_0.0332072357398', 'analog': 126},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.756461046194', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.592189078037', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.72380952381', 1, 'X', 0.72380952380952379, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5625', 1, 'Y', 0.5625, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.01904761904761898, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.025', 1, 'YVel', -0.025000000000000022, 'value']]}], 'name': 'state_0.286004065537', 'analog': 127},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.483317964627', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.104834807478', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.757142857143', 1, 'X', 0.75714285714285712, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.51875', 1, 'Y', 0.51875000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043749999999999956, 'value']]}], 'name': 'state_0.151444238815', 'analog': 128},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.73044715749', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.204946107229', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.795238095238', 1, 'X', 0.79523809523809519, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.46875', 1, 'Y', 0.46875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000044, 'value']]}], 'name': 'state_0.47920059722', 'analog': 129},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.49182533946', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.148014284112', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.59040349011', 'analog': 130},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.980667097156', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.255525137116', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.391105243914', 'analog': 131},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.905193529305', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.1', 1, 'Y', 0.10000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.702222061678', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.448226387607', 'analog': 132},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.360414328895', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.06875', 1, 'YVel', 0.068750000000000006, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.529527843106', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.0378661213879', 'analog': 133},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.556879060129', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012499999999999983, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.114681790847', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.485714285714', 1, 'X', 0.48571428571428571, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.125', 1, 'Y', 0.125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.309523809524', 1, 'XVel', -0.30952380952380948, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.34375', 1, 'YVel', -0.34375, 'value']]}], 'name': 'state_0.960370764506', 'analog': 134},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.749822947083', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.18125', 1, 'Y', 0.18124999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.753589662947', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.519047619048', 1, 'X', 0.51904761904761909, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.16875', 1, 'Y', 0.16875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333381, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043750000000000011, 'value']]}], 'name': 'state_0.294006836949', 'analog': 135},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.570467683195', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3125', 1, 'Y', 0.3125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.13125', 1, 'YVel', 0.13125000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.628765225239', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.552380952381', 1, 'X', 0.55238095238095242, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2125', 1, 'Y', 0.21249999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999983, 'value']]}], 'name': 'state_0.0452042713447', 'analog': 136},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.180966603057', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.31875', 1, 'Y', 0.31874999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.592888785707', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.25', 1, 'Y', 0.25, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000006, 'value']]}], 'name': 'state_0.455666535977', 'analog': 137},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.505981970646', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.41875', 1, 'Y', 0.41875000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1', 1, 'YVel', 0.10000000000000003, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.567393080431', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.604761904762', 1, 'X', 0.60476190476190472, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.28125', 1, 'Y', 0.28125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0238095238095', 1, 'XVel', 0.023809523809523725, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.575980184034', 'analog': 138},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.0138414765004', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.43125', 1, 'Y', 0.43125000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012500000000000011, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.322337534348', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.62380952381', 1, 'X', 0.62380952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.30625', 1, 'Y', 0.30625000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.025', 1, 'YVel', 0.025000000000000022, 'value']]}], 'name': 'state_0.241551731829', 'analog': 139},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.48355170492', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.43125', 1, 'Y', 0.43125000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.0780494646063', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.652380952381', 1, 'X', 0.65238095238095239, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.34375', 1, 'Y', 0.34375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.875522224706', 'analog': 140},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.526938584379', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2375', 1, 'Y', 0.23749999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.19375', 1, 'YVel', -0.19375000000000003, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.719330180012', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.690476190476', 1, 'X', 0.69047619047619047, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.39375', 1, 'Y', 0.39374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.049999999999999989, 'value']]}], 'name': 'state_0.536747011919', 'analog': 141},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.766852119017', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35625', 1, 'Y', 0.35625000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.11875', 1, 'YVel', 0.11875000000000002, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.494330386822', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.728571428571', 1, 'X', 0.72857142857142854, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.44375', 1, 'Y', 0.44374999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.049999999999999989, 'value']]}], 'name': 'state_0.24163681566', 'analog': 142},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.154466207516', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.49375', 1, 'Y', 0.49375000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1375', 1, 'YVel', 0.13750000000000001, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.336598300896', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.757142857143', 1, 'X', 0.75714285714285712, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.48125', 1, 'Y', 0.48125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0285714285714', 1, 'XVel', 0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000033, 'value']]}], 'name': 'state_0.217082400094', 'analog': 143},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.20296849745', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.50625', 1, 'Y', 0.50624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0125', 1, 'YVel', 0.012499999999999956, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.631479742663', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.766666666667', 1, 'X', 0.76666666666666672, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5125', 1, 'Y', 0.51249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.00952380952381', 1, 'XVel', 0.009523809523809601, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.031249999999999944, 'value']]}], 'name': 'state_0.238528007383', 'analog': 144},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.298437381298', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.50625', 1, 'Y', 0.50624999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.642742536189', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.719047619048', 1, 'X', 0.71904761904761905, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.55625', 1, 'Y', 0.55625000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.047619047619', 1, 'XVel', -0.047619047619047672, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043750000000000067, 'value']]}], 'name': 'state_0.112913235069', 'analog': 145},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.191036331654', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.44375', 1, 'Y', 0.44374999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0625', 1, 'YVel', -0.0625, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.846937168045', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.680952380952', 1, 'X', 0.68095238095238098, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5875', 1, 'Y', 0.58750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0380952380952', 1, 'XVel', -0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.44289074693', 'analog': 146},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.476288013164', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4375', 1, 'Y', 0.4375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.00625', 1, 'YVel', -0.0062499999999999778, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.846436681495', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.633333333333', 1, 'X', 0.6333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.63125', 1, 'Y', 0.63124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.047619047619', 1, 'XVel', -0.047619047619047672, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.975361229488', 'analog': 147},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.936905486268', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.4375', 1, 'Y', 0.4375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.11918399473', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.590476190476', 1, 'X', 0.59047619047619049, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.66875', 1, 'Y', 0.66874999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0428571428571', 1, 'XVel', -0.042857142857142816, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.425702534224', 'analog': 148},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.223008408295', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2875', 1, 'Y', 0.28749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.15', 1, 'YVel', -0.15000000000000002, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.964029981393', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.547619047619', 1, 'X', 0.54761904761904767, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.70625', 1, 'Y', 0.70625000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0428571428571', 1, 'XVel', -0.042857142857142816, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037500000000000089, 'value']]}], 'name': 'state_0.541248161727', 'analog': 149},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.385122784459', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.225', 1, 'Y', 0.22500000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0625', 1, 'YVel', -0.062499999999999972, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.00860169275721', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.504761904762', 1, 'X', 0.50476190476190474, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.74375', 1, 'Y', 0.74375000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0428571428571', 1, 'XVel', -0.042857142857142927, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0375', 1, 'YVel', 0.037499999999999978, 'value']]}], 'name': 'state_0.189685148885', 'analog': 150},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.100953995679', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.3125', 1, 'Y', 0.3125, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0875', 1, 'YVel', 0.087499999999999994, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.337649765177', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.452380952381', 1, 'X', 0.45238095238095238, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.7875', 1, 'Y', 0.78749999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.052380952381', 1, 'XVel', -0.052380952380952361, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}], 'name': 'state_0.434922667259', 'analog': 151},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.975495851644', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5', 1, 'Y', 0.5, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1875', 1, 'YVel', 0.1875, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.845576472998', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.395238095238', 1, 'X', 0.39523809523809522, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.8375', 1, 'Y', 0.83750000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0571428571429', 1, 'XVel', -0.057142857142857162, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.05', 1, 'YVel', 0.050000000000000044, 'value']]}], 'name': 'state_0.605908127046', 'analog': 152},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.96175939123', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.45', 1, 'Y', 0.45000000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999989, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.206498860406', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.361904761905', 1, 'X', 0.3619047619047619, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.86875', 1, 'Y', 0.86875000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0333333333333', 1, 'XVel', -0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.03125', 1, 'YVel', 0.03125, 'value']]}], 'name': 'state_0.327420749288', 'analog': 153},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.927697034435', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5125', 1, 'Y', 0.51249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0625', 1, 'YVel', 0.062499999999999944, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.771887539864', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.333333333333', 1, 'X', 0.33333333333333331, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.89375', 1, 'Y', 0.89375000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_-0.0285714285714', 1, 'XVel', -0.028571428571428581, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.025', 1, 'YVel', 0.025000000000000022, 'value']]}], 'name': 'state_0.872373686593', 'analog': 154},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.296364428784', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.475', 1, 'Y', 0.47499999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.739193870454', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.352380952381', 1, 'X', 0.35238095238095241, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.934375', 1, 'Y', 0.93437499999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0190476190476', 1, 'XVel', 0.019047619047619091, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.040625', 1, 'YVel', 0.040624999999999911, 'value']]}], 'name': 'state_0.432034209712', 'analog': 155},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.125487390547', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.375', 1, 'Y', 0.375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1', 1, 'YVel', -0.099999999999999978, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.730489807108', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.390476190476', 1, 'X', 0.39047619047619048, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.93125', 1, 'Y', 0.93125000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.003125', 1, 'YVel', -0.0031249999999999334, 'value']]}], 'name': 'state_0.0752575940004', 'analog': 156},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.179833560432', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.2625', 1, 'Y', 0.26250000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1125', 1, 'YVel', -0.11249999999999999, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.948821167289', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.447619047619', 1, 'X', 0.44761904761904764, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.88125', 1, 'Y', 0.88124999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0571428571429', 1, 'XVel', 0.057142857142857162, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.050000000000000044, 'value']]}], 'name': 'state_0.114218804289', 'analog': 157},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.348064147283', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35', 1, 'Y', 0.34999999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0875', 1, 'YVel', 0.087499999999999967, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.85727472095', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.490476190476', 1, 'X', 0.49047619047619045, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.84375', 1, 'Y', 0.84375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0428571428571', 1, 'XVel', 0.042857142857142816, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.0737087684927', 'analog': 158},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.993168917616', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.35625', 1, 'Y', 0.35625000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.00625', 1, 'YVel', 0.0062500000000000333, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.415764866586', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.533333333333', 1, 'X', 0.53333333333333333, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.80625', 1, 'Y', 0.80625000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0428571428571', 1, 'XVel', 0.042857142857142871, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.0609751744856', 'analog': 159},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.556358407856', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.46875', 1, 'Y', 0.46875, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1125', 1, 'YVel', 0.11249999999999999, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.398180267935', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.580952380952', 1, 'X', 0.580952380952381, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.7625', 1, 'Y', 0.76249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.047619047619', 1, 'XVel', 0.047619047619047672, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.04375', 1, 'YVel', -0.043750000000000067, 'value']]}], 'name': 'state_0.326417215647', 'analog': 160},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.160703232966', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.60625', 1, 'Y', 0.60624999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1375', 1, 'YVel', 0.13749999999999996, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.79458851704', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.638095238095', 1, 'X', 0.63809523809523805, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.7125', 1, 'Y', 0.71250000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0571428571429', 1, 'XVel', 0.057142857142857051, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05', 1, 'YVel', -0.049999999999999933, 'value']]}], 'name': 'state_0.446335844076', 'analog': 161},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.697623040188', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.525', 1, 'Y', 0.52500000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.08125', 1, 'YVel', -0.081249999999999933, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.730892233126', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.680952380952', 1, 'X', 0.68095238095238098, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.675', 1, 'Y', 0.67500000000000004, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0428571428571', 1, 'XVel', 0.042857142857142927, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037499999999999978, 'value']]}], 'name': 'state_0.716861546862', 'analog': 162},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.48446207164', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.56875', 1, 'Y', 0.56874999999999998, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.04375', 1, 'YVel', 0.043749999999999956, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.802554071866', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.72380952381', 1, 'X', 0.72380952380952379, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.6375', 1, 'Y', 0.63749999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0428571428571', 1, 'XVel', 0.042857142857142816, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.0375', 1, 'YVel', -0.037500000000000089, 'value']]}], 'name': 'state_0.293731079845', 'analog': 163},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.460217069352', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.5125', 1, 'Y', 0.51249999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.05625', 1, 'YVel', -0.056250000000000022, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.576643233446', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.761904761905', 1, 'X', 0.76190476190476186, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.60625', 1, 'Y', 0.60624999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0380952380952', 1, 'XVel', 0.038095238095238071, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.03125', 1, 'YVel', -0.03125, 'value']]}], 'name': 'state_0.898883940234', 'analog': 164},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.306514916147', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.39375', 1, 'Y', 0.39374999999999999, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.11875', 1, 'YVel', -0.11874999999999997, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.891366003233', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.795238095238', 1, 'X', 0.79523809523809519, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.575', 1, 'Y', 0.57499999999999996, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0333333333333', 1, 'XVel', 0.033333333333333326, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.03125', 1, 'YVel', -0.03125, 'value']]}], 'name': 'state_0.4979098627', 'analog': 165},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.00947859224353', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.49375', 1, 'Y', 0.49375000000000002, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1', 1, 'YVel', 0.10000000000000003, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.134895468261', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.419010684589', 'analog': 166},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.396865903698', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.59375', 1, 'Y', 0.59375, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.1', 1, 'YVel', 0.099999999999999978, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.749550304059', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.545606450378', 'analog': 167},
{'set': 'memory', 'RBs': [{'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'bar_0.469213682387', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.790476190476', 1, 'X', 0.79047619047619044, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.48125', 1, 'Y', 0.48125000000000001, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_-0.1125', 1, 'YVel', -0.11249999999999999, 'value']]}, {'higher_order': False, 'pred_name': 'non_exist', 'P': 'non_exist', 'object_name': 'ball_0.912963560057', 'pred_sem': [], 'object_sem': [['X', 1, 'X', 'nil', 'state'], ['X_0.0', 1, 'X', 0.0, 'value'], ['Y', 1, 'Y', 'nil', 'state'], ['Y_0.0', 1, 'Y', 0.0, 'value'], ['XVel', 1, 'XVel', 'nil', 'state'], ['XVel_0.0', 1, 'XVel', 0.0, 'value'], ['YVel', 1, 'YVel', 'nil', 'state'], ['YVel_0.0', 1, 'YVel', 0.0, 'value']]}], 'name': 'state_0.578067692552', 'analog': 168},
]
| 1,028.226744
| 1,108
| 0.572853
| 25,523
| 176,855
| 3.803863
| 0.045136
| 0.111406
| 0.023361
| 0.069629
| 0.890633
| 0.890633
| 0.890633
| 0.890633
| 0.888686
| 0.888686
| 0
| 0.237177
| 0.104181
| 176,855
| 172
| 1,109
| 1,028.226744
| 0.375623
| 0
| 0
| 0
| 0
| 0
| 0.453403
| 0.008204
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
579345ab72eb9317ea9e46004c2c8a421e8de0b2
| 21,087
|
py
|
Python
|
tests/cli/test_spec.py
|
maximzubkov/codeprep
|
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
|
[
"Apache-2.0"
] | 33
|
2020-03-02T23:42:15.000Z
|
2022-03-18T02:34:32.000Z
|
tests/cli/test_spec.py
|
maximzubkov/codeprep
|
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
|
[
"Apache-2.0"
] | 10
|
2020-02-27T13:43:00.000Z
|
2021-04-21T12:11:44.000Z
|
tests/cli/test_spec.py
|
maximzubkov/codeprep
|
807ee1ea33796b6853c45e9dcb4e866b3f09a5f2
|
[
"Apache-2.0"
] | 9
|
2020-03-16T14:28:06.000Z
|
2021-09-30T09:40:56.000Z
|
# SPDX-FileCopyrightText: 2020 Hlib Babii <hlibbabii@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
import os
from unittest import mock
from unittest.mock import Mock
import pytest
from docopt import DocoptExit
from codeprep.bpepkg.bpe_config import BpeConfig, BpeParam
from codeprep.cli.spec import parse_and_run
from codeprep.prepconfig import PrepParam, PrepConfig
PATH_TO_OUTPUT_STUB = os.path.join('/', 'path', 'to', 'output')
PATH_TO_DATASET_STUB = os.path.join('/', 'path', 'to', 'dataset')
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc100u(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_Uc100u(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--no-spaces', '--no-unicode']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'U',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
def test_xx0xxx_max_str_length():
argv = ['nosplit', 'str', '-e', 'java', '--no-spaces', '--no-str', '--no-com', '--max-str-length=2']
with pytest.raises(DocoptExit):
parse_and_run(argv)
def test_xx0Fxx_max_str_length():
argv = ['nosplit', 'str', '-e', 'java', '--no-spaces', '--no-str', '--no-com', '--full-strings']
with pytest.raises(DocoptExit):
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx0sx(api_mock):
argv = ['nosplit', 'str', '-e', 'java', ]
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxxFsx(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xx2xxx_max_str_length0(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings', '--max-str-length=0']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '2',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xx2xxx_max_str_length1(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings', '--max-str-length=1']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '2',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xx2xxx(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings', '--max-str-length=2']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '2',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxExxx(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings', '--max-str-length=14']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: 'E',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xx1xxx_max_str_length_large(api_mock):
argv = ['nosplit', 'str', '-e', 'java', '--full-strings', '--max-str-length=999']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
def test_xxx0x1():
argv = ['nosplit', 'str', '-e', 'java', '--no-spaces', '--no-case']
with pytest.raises(DocoptExit) as context:
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc110l(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxA1xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-str', '--max-str-length=10']
with pytest.raises(DocoptExit):
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_Uxx1xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--no-unicode']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'U',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xc01xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--no-str']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '0',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_x001xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--no-str', '--no-com']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: '0',
PrepParam.STR: '0',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_x011xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--no-com']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: '0',
PrepParam.STR: '1',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc12xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--split-numbers']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '2',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc13xx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--ronin']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '3',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
def test_xx0xxx_with_max_str_length():
argv = ['basic', 'str', '-e', 'java', '--no-str', '--max-str-length=10']
with pytest.raises(DocoptExit):
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx3xl(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--no-case', '--ronin']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '3',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc1sxx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces', '--stem']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: 's',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc14xx(api_mock):
argv = ['bpe', '5k', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '4',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, '5k', extension="java")
def test_xxA4xx():
argv = ['bpe', '5k', 'str', '-e', 'java', '--no-str', '--max-str-length=10']
with pytest.raises(DocoptExit):
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_uc15xx(api_mock):
argv = ['bpe', '1k', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '5',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, '1k', extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx6xx(api_mock):
argv = ['bpe', '10k', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '6',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, '10k', extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx9xx(api_mock):
argv = ['bpe', 'custom-id-5000', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '9',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, 'custom-id-5000', extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx8xx(api_mock):
argv = ['chars', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '8',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, '0', extension="java")
def test_xxA8xx():
argv = ['chars', 'str', '-e', 'java', '--no-str', '--max-str-length=10']
with pytest.raises(DocoptExit):
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx1sx(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-case']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_xxx1xu(api_mock):
argv = ['basic', 'str', '-e', 'java', '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_path(api_mock):
argv = ['nosplit', '--path', PATH_TO_DATASET_STUB, '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.corpus.preprocess_corpus.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None, calc_vocab=False,
extensions=None, output_path=None)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_path_short(api_mock):
argv = ['nosplit', '-p', PATH_TO_DATASET_STUB, '--no-spaces']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.corpus.preprocess_corpus.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None, calc_vocab=False,
extensions=None, output_path=None)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_output_and_vocab(api_mock):
argv = ['nosplit', '--path', PATH_TO_DATASET_STUB, '--output-path', PATH_TO_OUTPUT_STUB, '--no-spaces', '--calc-vocab']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.corpus.preprocess_corpus.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None,
calc_vocab=True, extensions=None,
output_path=PATH_TO_OUTPUT_STUB)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_output_and_vocab_short(api_mock):
argv = ['nosplit', '--path', PATH_TO_DATASET_STUB, '-o', PATH_TO_OUTPUT_STUB, '--no-spaces', '-V']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: 'c',
PrepParam.STR: '1',
PrepParam.SPLIT: '0',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'u'
})
api_mock.corpus.preprocess_corpus.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None,
calc_vocab=True, extensions=None,
output_path=PATH_TO_OUTPUT_STUB)
def test_output_with_text():
argv = ['nosplit', 'str', '-o', PATH_TO_OUTPUT_STUB, '--no-spaces']
with pytest.raises(DocoptExit) as context:
parse_and_run(argv)
@mock.patch('codeprep.cli.impl.codeprep.api', autospec=True)
def test_all_short_config_options(api_mock):
argv = ['basic', 'str', '-e', 'java', '-0lSCU']
parse_and_run(argv)
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'U',
PrepParam.COM: '0',
PrepParam.STR: '0',
PrepParam.SPLIT: '1',
PrepParam.TABS_NEWLINES: '0',
PrepParam.CASE: 'l'
})
api_mock.text.preprocess.assert_called_with("str", prep_config, None, extension="java")
@mock.patch('codeprep.cli.impl.Dataset', autospec=True)
@mock.patch('codeprep.cli.impl.bpelearner', autospec=True)
@mock.patch('codeprep.pipeline.dataset.os.path.abspath', autospec=True)
def test_yes_false_java_yes(abspath_mock, bpe_learner_mock, dataset_mock):
# given
abspath_mock.return_value = PATH_TO_DATASET_STUB
dataset_mock.create = Mock(spec=dataset_mock, return_value=dataset_mock)
argv = ['learn-bpe', '1000', '-p', PATH_TO_DATASET_STUB, '--legacy']
# when
parse_and_run(argv)
# then
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: '0',
PrepParam.STR: 'E',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
bpe_config = BpeConfig({
BpeParam.CASE: 'yes',
BpeParam.WORD_END: False,
BpeParam.BASE: 'java',
BpeParam.UNICODE: 'yes',
})
dataset_mock.create.assert_called_with(PATH_TO_DATASET_STUB, prep_config, 'java', None, bpe_config)
bpe_learner_mock.run.assert_called_with(dataset_mock, 1000, bpe_config)
@mock.patch('codeprep.cli.impl.Dataset', autospec=True)
@mock.patch('codeprep.cli.impl.bpelearner', autospec=True)
@mock.patch('codeprep.pipeline.dataset.os.path.abspath', autospec=True)
def test_no_true_code_no(abspath_mock, bpe_learner_mock, dataset_mock):
# given
abspath_mock.return_value = PATH_TO_DATASET_STUB
dataset_mock.create = Mock(spec=dataset_mock, return_value=dataset_mock)
argv = ['learn-bpe', '1000', '-p', PATH_TO_DATASET_STUB, '--no-unicode', '--word-end']
# when
parse_and_run(argv)
# then
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'U',
PrepParam.COM: '0',
PrepParam.STR: 'E',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
bpe_config = BpeConfig({
BpeParam.CASE: 'yes',
BpeParam.WORD_END: True,
BpeParam.BASE: 'code',
BpeParam.UNICODE: 'no',
})
dataset_mock.create.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None, None, bpe_config)
bpe_learner_mock.run.assert_called_with(dataset_mock, 1000, bpe_config)
@mock.patch('codeprep.cli.impl.Dataset', autospec=True)
@mock.patch('codeprep.cli.impl.bpelearner', autospec=True)
@mock.patch('codeprep.pipeline.dataset.os.path.abspath', autospec=True)
def test_true_true_code_bytes(abspath_mock, bpe_learner_mock, dataset_mock):
# given
abspath_mock.return_value = PATH_TO_DATASET_STUB
dataset_mock.create = Mock(spec=dataset_mock, return_value=dataset_mock)
argv = ['learn-bpe', '1000', '-p', PATH_TO_DATASET_STUB, '--bytes', '--word-end']
# when
parse_and_run(argv)
# then
prep_config = PrepConfig({
PrepParam.EN_ONLY: 'u',
PrepParam.COM: '0',
PrepParam.STR: 'E',
PrepParam.SPLIT: 'F',
PrepParam.TABS_NEWLINES: 's',
PrepParam.CASE: 'u'
})
bpe_config = BpeConfig({
BpeParam.CASE: 'yes',
BpeParam.WORD_END: True,
BpeParam.BASE: 'code',
BpeParam.UNICODE: 'bytes',
})
dataset_mock.create.assert_called_with(PATH_TO_DATASET_STUB, prep_config, None, None, bpe_config)
bpe_learner_mock.run.assert_called_with(dataset_mock, 1000, bpe_config)
| 34.121359
| 123
| 0.624887
| 2,684
| 21,087
| 4.697466
| 0.061103
| 0.052348
| 0.036643
| 0.048779
| 0.92703
| 0.92481
| 0.917195
| 0.907519
| 0.899508
| 0.882297
| 0
| 0.011601
| 0.207
| 21,087
| 618
| 124
| 34.121359
| 0.742375
| 0.006876
| 0
| 0.78865
| 0
| 0
| 0.150884
| 0.057907
| 0
| 0
| 0
| 0
| 0.07045
| 1
| 0.080235
| false
| 0
| 0.015656
| 0
| 0.09589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57a8b1d5a5b8d135cc9f90282c61669726d433c3
| 49,188
|
py
|
Python
|
download_genomes_ena/download_the_last_of_genomes.py
|
BioDragao/MAF
|
b20a39b474f2352920dae15273e633cea39b28d2
|
[
"MIT"
] | null | null | null |
download_genomes_ena/download_the_last_of_genomes.py
|
BioDragao/MAF
|
b20a39b474f2352920dae15273e633cea39b28d2
|
[
"MIT"
] | null | null | null |
download_genomes_ena/download_the_last_of_genomes.py
|
BioDragao/MAF
|
b20a39b474f2352920dae15273e633cea39b28d2
|
[
"MIT"
] | null | null | null |
import os
import json
import subprocess
import shutil
all_ftp_links = [
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR841/ERR841438/ERR841438.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR841/ERR841440/ERR841440.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR841/ERR841441/ERR841441.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR218/007/ERR2181457/ERR2181457.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234202/ERR234202_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234202/ERR234202_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234113/ERR234113_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234113/ERR234113_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234204/ERR234204_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR234/ERR234204/ERR234204_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR218/008/ERR2181458/ERR2181458_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR218/008/ERR2181458/ERR2181458_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/003/ERR1334053/ERR1334053_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/003/ERR1334053/ERR1334053_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/002/ERR1334052/ERR1334052_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/002/ERR1334052/ERR1334052_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/001/ERR1334051/ERR1334051_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/001/ERR1334051/ERR1334051_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/000/ERR1334050/ERR1334050_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/000/ERR1334050/ERR1334050_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/009/ERR1334049/ERR1334049_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR133/009/ERR1334049/ERR1334049_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215483/ERR1215483_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215483/ERR1215483_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215482/ERR1215482_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215482/ERR1215482_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215481/ERR1215481_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215481/ERR1215481_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215480/ERR1215480_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215480/ERR1215480_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/009/ERR1215479/ERR1215479_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/009/ERR1215479/ERR1215479_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/008/ERR1215478/ERR1215478_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/008/ERR1215478/ERR1215478_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/007/ERR1215477/ERR1215477_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/007/ERR1215477/ERR1215477_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/006/ERR1215476/ERR1215476_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/006/ERR1215476/ERR1215476_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/005/ERR1215475/ERR1215475_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/005/ERR1215475/ERR1215475_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082113/ERR1082113_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082113/ERR1082113_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082114/ERR1082114_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082114/ERR1082114_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082115/ERR1082115_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082115/ERR1082115_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082116/ERR1082116_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082116/ERR1082116_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082117/ERR1082117_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082117/ERR1082117_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082119/ERR1082119_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082119/ERR1082119_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082120/ERR1082120_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082120/ERR1082120_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082122/ERR1082122_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082122/ERR1082122_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082123/ERR1082123_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082123/ERR1082123_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082124/ERR1082124_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082124/ERR1082124_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082125/ERR1082125_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082125/ERR1082125_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082126/ERR1082126_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082126/ERR1082126_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082127/ERR1082127_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082127/ERR1082127_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082128/ERR1082128_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082128/ERR1082128_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082129/ERR1082129_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082129/ERR1082129_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082130/ERR1082130_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082130/ERR1082130_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082131/ERR1082131_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082131/ERR1082131_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082132/ERR1082132_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082132/ERR1082132_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082133/ERR1082133_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082133/ERR1082133_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082134/ERR1082134_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/004/ERR1082134/ERR1082134_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082135/ERR1082135_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/005/ERR1082135/ERR1082135_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082136/ERR1082136_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/006/ERR1082136/ERR1082136_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082137/ERR1082137_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/007/ERR1082137/ERR1082137_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082138/ERR1082138_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082138/ERR1082138_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082139/ERR1082139_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/009/ERR1082139/ERR1082139_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082140/ERR1082140_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/000/ERR1082140/ERR1082140_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082141/ERR1082141_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082141/ERR1082141_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082142/ERR1082142_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/002/ERR1082142/ERR1082142_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082143/ERR1082143_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/003/ERR1082143/ERR1082143_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/003/ERR1203053/ERR1203053_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/003/ERR1203053/ERR1203053_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/004/ERR1203054/ERR1203054_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/004/ERR1203054/ERR1203054_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/005/ERR1203055/ERR1203055_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/005/ERR1203055/ERR1203055_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/004/ERR1215474/ERR1215474_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/004/ERR1215474/ERR1215474_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203056/ERR1203056_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203056/ERR1203056_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/007/ERR1203057/ERR1203057_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/007/ERR1203057/ERR1203057_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/008/ERR1203058/ERR1203058_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/008/ERR1203058/ERR1203058_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/009/ERR1203059/ERR1203059_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/009/ERR1203059/ERR1203059_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/001/ERR1203061/ERR1203061_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/001/ERR1203061/ERR1203061_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/002/ERR1203062/ERR1203062_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/002/ERR1203062/ERR1203062_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/005/ERR1203065/ERR1203065_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/005/ERR1203065/ERR1203065_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203066/ERR1203066_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203066/ERR1203066_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/008/ERR1203068/ERR1203068_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/008/ERR1203068/ERR1203068_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/009/ERR1203069/ERR1203069_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/009/ERR1203069/ERR1203069_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/000/ERR1203070/ERR1203070_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/000/ERR1203070/ERR1203070_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/002/ERR1203072/ERR1203072_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/002/ERR1203072/ERR1203072_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/004/ERR1203074/ERR1203074_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/004/ERR1203074/ERR1203074_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203076/ERR1203076_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/006/ERR1203076/ERR1203076_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/007/ERR1203077/ERR1203077_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR120/007/ERR1203077/ERR1203077_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502471/ERR502471_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502471/ERR502471_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502475/ERR502475_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502475/ERR502475_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502476/ERR502476_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502476/ERR502476_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502477/ERR502477_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502477/ERR502477_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502480/ERR502480_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502480/ERR502480_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502487/ERR502487_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502487/ERR502487_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702401/ERR702401_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702401/ERR702401_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502494/ERR502494_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502494/ERR502494_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502500/ERR502500_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502500/ERR502500_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502501/ERR502501_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502501/ERR502501_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502502/ERR502502_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502502/ERR502502_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502503/ERR502503_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502503/ERR502503_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502505/ERR502505_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502505/ERR502505_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502506/ERR502506_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502506/ERR502506_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502507/ERR502507_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502507/ERR502507_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502508/ERR502508_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502508/ERR502508_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502509/ERR502509_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502509/ERR502509_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502512/ERR502512_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502512/ERR502512_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502513/ERR502513_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502513/ERR502513_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502514/ERR502514_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502514/ERR502514_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502515/ERR502515_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502515/ERR502515_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502516/ERR502516_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502516/ERR502516_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502517/ERR502517_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502517/ERR502517_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502521/ERR502521_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502521/ERR502521_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502522/ERR502522_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502522/ERR502522_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502523/ERR502523_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502523/ERR502523_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502525/ERR502525_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502525/ERR502525_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502527/ERR502527_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502527/ERR502527_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502528/ERR502528_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502528/ERR502528_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502530/ERR502530_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502530/ERR502530_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502533/ERR502533_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502533/ERR502533_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502534/ERR502534_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502534/ERR502534_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502536/ERR502536_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502536/ERR502536_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502537/ERR502537_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502537/ERR502537_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR517/ERR517471/ERR517471_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR517/ERR517471/ERR517471_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215473/ERR1215473_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215473/ERR1215473_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215472/ERR1215472_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215472/ERR1215472_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215471/ERR1215471_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215471/ERR1215471_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215470/ERR1215470_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215470/ERR1215470_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702407/ERR702407_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702407/ERR702407_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702408/ERR702408_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702408/ERR702408_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702409/ERR702409_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702409/ERR702409_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702410/ERR702410_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702410/ERR702410_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702411/ERR702411_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702411/ERR702411_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702412/ERR702412_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702412/ERR702412_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702413/ERR702413_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702413/ERR702413_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702414/ERR702414_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702414/ERR702414_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702415/ERR702415_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702415/ERR702415_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702416/ERR702416_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702416/ERR702416_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702417/ERR702417_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702417/ERR702417_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702418/ERR702418_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702418/ERR702418_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702419/ERR702419_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702419/ERR702419_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702420/ERR702420_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702420/ERR702420_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702421/ERR702421_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702421/ERR702421_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702422/ERR702422_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702422/ERR702422_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702423/ERR702423_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702423/ERR702423_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702424/ERR702424_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702424/ERR702424_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702425/ERR702425_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702425/ERR702425_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702426/ERR702426_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702426/ERR702426_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702427/ERR702427_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702427/ERR702427_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702428/ERR702428_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702428/ERR702428_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702430/ERR702430_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702430/ERR702430_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702431/ERR702431_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702431/ERR702431_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702432/ERR702432_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702432/ERR702432_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702434/ERR702434_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702434/ERR702434_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702435/ERR702435_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702435/ERR702435_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702436/ERR702436_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702436/ERR702436_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/009/ERR1215469/ERR1215469_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/009/ERR1215469/ERR1215469_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/008/ERR1215468/ERR1215468_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/008/ERR1215468/ERR1215468_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/007/ERR1215467/ERR1215467_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/007/ERR1215467/ERR1215467_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/006/ERR1215466/ERR1215466_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/006/ERR1215466/ERR1215466_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/005/ERR1215465/ERR1215465_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/005/ERR1215465/ERR1215465_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/004/ERR1215464/ERR1215464_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/004/ERR1215464/ERR1215464_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215463/ERR1215463_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/003/ERR1215463/ERR1215463_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215462/ERR1215462_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/002/ERR1215462/ERR1215462_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215461/ERR1215461_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/001/ERR1215461/ERR1215461_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215460/ERR1215460_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR121/000/ERR1215460/ERR1215460_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082121/ERR1082121_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/001/ERR1082121/ERR1082121_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082118/ERR1082118_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR108/008/ERR1082118/ERR1082118_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR845/ERR845916/ERR845916_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR845/ERR845916/ERR845916_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502498/ERR502498_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502498/ERR502498_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502497/ERR502497_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502497/ERR502497_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502496/ERR502496_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502496/ERR502496_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502493/ERR502493_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502493/ERR502493_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502492/ERR502492_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502492/ERR502492_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502490/ERR502490_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502490/ERR502490_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502489/ERR502489_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502489/ERR502489_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502486/ERR502486_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502486/ERR502486_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502485/ERR502485_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502485/ERR502485_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502484/ERR502484_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502484/ERR502484_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502483/ERR502483_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502483/ERR502483_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502482/ERR502482_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502482/ERR502482_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502481/ERR502481_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502481/ERR502481_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702400/ERR702400_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702400/ERR702400_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552588/ERR552588_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552588/ERR552588_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552963/ERR552963_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552963/ERR552963_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552172/ERR552172_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552172/ERR552172_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552187/ERR552187_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552187/ERR552187_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552021/ERR552021_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552021/ERR552021_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552328/ERR552328_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552328/ERR552328_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR551/ERR551617/ERR551617_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR551/ERR551617/ERR551617_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR868/ERR868539/ERR868539_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR868/ERR868539/ERR868539_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552506/ERR552506_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR552/ERR552506/ERR552506_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR909/ERR909754/ERR909754_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR909/ERR909754/ERR909754_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702429/ERR702429_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR702/ERR702429/ERR702429_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR909/ERR909753/ERR909753_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR909/ERR909753/ERR909753_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502532/ERR502532_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502532/ERR502532_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502531/ERR502531_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502531/ERR502531_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502524/ERR502524_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502524/ERR502524_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502511/ERR502511_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502511/ERR502511_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502510/ERR502510_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR502/ERR502510/ERR502510_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751290/ERR751290_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751290/ERR751290_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751291/ERR751291_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751291/ERR751291_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751292/ERR751292_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751292/ERR751292_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751293/ERR751293_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751293/ERR751293_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751294/ERR751294_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751294/ERR751294_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751295/ERR751295_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751295/ERR751295_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751296/ERR751296_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751296/ERR751296_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751297/ERR751297_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751297/ERR751297_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751298/ERR751298_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751298/ERR751298_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751299/ERR751299_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751299/ERR751299_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751300/ERR751300_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751300/ERR751300_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751301/ERR751301_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751301/ERR751301_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751302/ERR751302_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751302/ERR751302_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751303/ERR751303_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751303/ERR751303_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751304/ERR751304_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751304/ERR751304_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751305/ERR751305_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751305/ERR751305_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751306/ERR751306_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751306/ERR751306_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751307/ERR751307_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751307/ERR751307_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751308/ERR751308_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751308/ERR751308_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751309/ERR751309_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751309/ERR751309_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751310/ERR751310_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751310/ERR751310_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751311/ERR751311_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751311/ERR751311_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751312/ERR751312_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751312/ERR751312_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751313/ERR751313_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751313/ERR751313_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751314/ERR751314_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751314/ERR751314_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751315/ERR751315_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751315/ERR751315_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751319/ERR751319_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751319/ERR751319_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751320/ERR751320_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751320/ERR751320_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751321/ERR751321_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751321/ERR751321_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751322/ERR751322_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751322/ERR751322_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751323/ERR751323_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751323/ERR751323_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751324/ERR751324_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751324/ERR751324_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751327/ERR751327_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751327/ERR751327_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751328/ERR751328_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751328/ERR751328_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751329/ERR751329_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751329/ERR751329_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751330/ERR751330_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751330/ERR751330_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751331/ERR751331_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751331/ERR751331_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751332/ERR751332_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751332/ERR751332_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751333/ERR751333_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751333/ERR751333_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751334/ERR751334_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751334/ERR751334_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751335/ERR751335_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751335/ERR751335_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751336/ERR751336_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751336/ERR751336_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751337/ERR751337_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751337/ERR751337_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751338/ERR751338_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751338/ERR751338_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751339/ERR751339_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751339/ERR751339_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751341/ERR751341_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751341/ERR751341_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751342/ERR751342_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751342/ERR751342_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751343/ERR751343_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751343/ERR751343_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751344/ERR751344_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751344/ERR751344_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751345/ERR751345_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751345/ERR751345_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751346/ERR751346_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751346/ERR751346_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751348/ERR751348_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751348/ERR751348_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751349/ERR751349_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751349/ERR751349_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751347/ERR751347_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751347/ERR751347_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751326/ERR751326_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751326/ERR751326_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751325/ERR751325_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751325/ERR751325_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751318/ERR751318_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751318/ERR751318_2.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751317/ERR751317_1.fastq.gz",
"ftp://ftp.sra.ebi.ac.uk/vol1/fastq/ERR751/ERR751317/ERR751317_2.fastq.gz",
]
completed_and_uploaded_genomes = [
"ERR234202_2.fastq.gz",
"ERR502475_1.fastq.gz",
"ERR502475_2.fastq.gz",
"ERR502476_1.fastq.gz",
"ERR502476_2.fastq.gz",
"ERR502477_1.fastq.gz",
"ERR502477_2.fastq.gz",
"ERR502480_1.fastq.gz",
"ERR502480_2.fastq.gz",
"ERR502481_1.fastq.gz",
"ERR502471_2.fastq.gz",
"ERR502482_1.fastq.gz",
"ERR502482_2.fastq.gz",
"ERR502483_1.fastq.gz",
"ERR502483_2.fastq.gz",
"ERR502484_1.fastq.gz",
"ERR502484_2.fastq.gz",
"ERR502485_1.fastq.gz",
"ERR502485_2.fastq.gz",
"ERR502486_1.fastq.gz",
"ERR234204_2.fastq.gz",
"ERR502471_1.fastq.gz",
"ERR234113_2.fastq.gz",
"ERR234113_1.fastq.gz",
"ERR234202_1.fastq.gz",
"ERR502481_2.fastq.gz",
"ERR234204_1.fastq.gz",
"ERR502487_2.fastq.gz",
"ERR502492_2.fastq.gz",
"ERR502486_2.fastq.gz",
"ERR502493_2.fastq.gz",
"ERR502494_1.fastq.gz",
"ERR502494_2.fastq.gz",
"ERR502496_1.fastq.gz",
"ERR502496_2.fastq.gz",
"ERR502497_1.fastq.gz",
"ERR502497_2.fastq.gz",
"ERR502498_1.fastq.gz",
"ERR502498_2.fastq.gz",
"ERR502489_1.fastq.gz",
"ERR502489_2.fastq.gz",
"ERR502490_1.fastq.gz",
"ERR502490_2.fastq.gz",
"ERR502493_1.fastq.gz",
"ERR502487_1.fastq.gz",
"ERR502492_1.fastq.gz",
"ERR502500_1.fastq.gz",
"ERR502501_2.fastq.gz",
"ERR502505_2.fastq.gz",
"ERR502506_1.fastq.gz",
"ERR502506_2.fastq.gz",
"ERR502507_1.fastq.gz",
"ERR502507_2.fastq.gz",
"ERR502508_1.fastq.gz",
"ERR502508_2.fastq.gz",
"ERR502509_1.fastq.gz",
"ERR502509_2.fastq.gz",
"ERR502510_1.fastq.gz",
"ERR502510_2.fastq.gz",
"ERR502511_1.fastq.gz",
"ERR502511_2.fastq.gz",
"ERR502512_1.fastq.gz",
"ERR502512_2.fastq.gz",
"ERR502513_1.fastq.gz",
"ERR502513_2.fastq.gz",
"ERR502514_1.fastq.gz",
"ERR502514_2.fastq.gz",
"ERR502515_1.fastq.gz",
"ERR502515_2.fastq.gz",
"ERR502502_2.fastq.gz",
"ERR502503_1.fastq.gz",
"ERR502503_2.fastq.gz",
"ERR502500_2.fastq.gz",
"ERR502501_1.fastq.gz",
"ERR502521_2.fastq.gz",
"ERR502522_1.fastq.gz",
"ERR502522_2.fastq.gz",
"ERR502523_1.fastq.gz",
"ERR502523_2.fastq.gz",
"ERR502524_1.fastq.gz",
"ERR502524_2.fastq.gz",
"ERR502525_1.fastq.gz",
"ERR502525_2.fastq.gz",
"ERR502527_1.fastq.gz",
"ERR502527_2.fastq.gz",
"ERR502528_1.fastq.gz",
"ERR502528_2.fastq.gz",
"ERR502530_1.fastq.gz",
"ERR502530_2.fastq.gz",
"ERR502531_1.fastq.gz",
"ERR502531_2.fastq.gz",
"ERR502532_1.fastq.gz",
"ERR502532_2.fastq.gz",
"ERR502533_1.fastq.gz",
"ERR502533_2.fastq.gz",
"ERR502534_1.fastq.gz",
"ERR502516_1.fastq.gz",
"ERR502516_2.fastq.gz",
"ERR502517_1.fastq.gz",
"ERR502537_1.fastq.gz",
"ERR502537_2.fastq.gz",
"ERR517471_1.fastq.gz",
"ERR517471_2.fastq.gz",
"ERR551617_1.fastq.gz",
"ERR551617_2.fastq.gz",
"ERR552021_1.fastq.gz",
"ERR552021_2.fastq.gz",
"ERR552172_1.fastq.gz",
"ERR552172_2.fastq.gz",
"ERR552187_1.fastq.gz",
"ERR552187_2.fastq.gz",
"ERR552328_1.fastq.gz",
"ERR552328_2.fastq.gz",
"ERR552506_1.fastq.gz",
"ERR552506_2.fastq.gz",
"ERR552588_1.fastq.gz",
"ERR552588_2.fastq.gz",
"ERR552963_1.fastq.gz",
"ERR552963_2.fastq.gz",
"ERR702400_1.fastq.gz",
"ERR702400_2.fastq.gz",
"ERR702401_1.fastq.gz",
"ERR702401_2.fastq.gz",
"ERR702407_1.fastq.gz",
"ERR702407_2.fastq.gz",
"ERR702408_1.fastq.gz",
"ERR702408_2.fastq.gz",
"ERR702409_1.fastq.gz",
"ERR702409_2.fastq.gz",
"ERR702410_1.fastq.gz",
"ERR702410_2.fastq.gz",
"ERR702411_1.fastq.gz",
"ERR702411_2.fastq.gz",
"ERR702412_1.fastq.gz",
"ERR702412_2.fastq.gz",
"ERR702413_1.fastq.gz",
"ERR702413_2.fastq.gz",
"ERR702414_1.fastq.gz",
"ERR702414_2.fastq.gz",
"ERR702415_1.fastq.gz",
"ERR702415_2.fastq.gz",
"ERR702416_1.fastq.gz",
"ERR702416_2.fastq.gz",
"ERR702417_1.fastq.gz",
"ERR702417_2.fastq.gz",
"ERR702418_1.fastq.gz",
"ERR702418_2.fastq.gz",
"ERR702419_1.fastq.gz",
"ERR702419_2.fastq.gz",
"ERR702420_1.fastq.gz",
"ERR702420_2.fastq.gz",
"ERR702421_1.fastq.gz",
"ERR702421_2.fastq.gz",
"ERR702422_1.fastq.gz",
"ERR702422_2.fastq.gz",
"ERR702423_1.fastq.gz",
"ERR702423_2.fastq.gz",
"ERR702424_1.fastq.gz",
"ERR702424_2.fastq.gz",
"ERR702425_1.fastq.gz",
"ERR702425_2.fastq.gz",
"ERR702426_1.fastq.gz",
"ERR702426_2.fastq.gz",
"ERR702427_1.fastq.gz",
"ERR702427_2.fastq.gz",
"ERR702428_1.fastq.gz",
"ERR702428_2.fastq.gz",
"ERR702429_1.fastq.gz",
"ERR702429_2.fastq.gz",
"ERR702430_1.fastq.gz",
"ERR702430_2.fastq.gz",
"ERR702431_1.fastq.gz",
"ERR702431_2.fastq.gz",
"ERR702432_1.fastq.gz",
"ERR502517_2.fastq.gz",
"ERR702434_1.fastq.gz",
"ERR702434_2.fastq.gz",
"ERR702435_1.fastq.gz",
"ERR702435_2.fastq.gz",
"ERR702436_1.fastq.gz",
"ERR702436_2.fastq.gz",
"ERR751290_1.fastq.gz",
"ERR751290_2.fastq.gz",
"ERR502505_1.fastq.gz",
"ERR502502_1.fastq.gz",
"ERR502521_1.fastq.gz",
"ERR502536_2.fastq.gz",
"ERR502534_2.fastq.gz",
"ERR502536_1.fastq.gz",
"ERR751294_1.fastq.gz",
"ERR751294_2.fastq.gz",
"ERR751291_2.fastq.gz",
"ERR751295_2.fastq.gz",
"ERR751296_1.fastq.gz",
"ERR751296_2.fastq.gz",
"ERR751297_1.fastq.gz",
"ERR751297_2.fastq.gz",
"ERR751298_1.fastq.gz",
"ERR751298_2.fastq.gz",
"ERR751299_1.fastq.gz",
"ERR751299_2.fastq.gz",
"ERR751293_1.fastq.gz",
"ERR751291_1.fastq.gz",
"ERR751292_2.fastq.gz",
"ERR751301_2.fastq.gz",
"ERR751302_1.fastq.gz",
"ERR751302_2.fastq.gz",
"ERR751303_1.fastq.gz",
"ERR751303_2.fastq.gz",
"ERR751304_1.fastq.gz",
"ERR751304_2.fastq.gz",
"ERR751305_1.fastq.gz",
"ERR751305_2.fastq.gz",
"ERR751306_1.fastq.gz",
"ERR751306_2.fastq.gz",
"ERR751307_1.fastq.gz",
"ERR751307_2.fastq.gz",
"ERR751308_1.fastq.gz",
"ERR751308_2.fastq.gz",
"ERR751309_1.fastq.gz",
"ERR751309_2.fastq.gz",
"ERR751301_1.fastq.gz",
"ERR751310_2.fastq.gz",
"ERR751311_1.fastq.gz",
"ERR751311_2.fastq.gz",
"ERR751312_1.fastq.gz",
"ERR751312_2.fastq.gz",
"ERR751313_1.fastq.gz",
"ERR702432_2.fastq.gz",
"ERR751300_1.fastq.gz",
"ERR751300_2.fastq.gz",
"ERR751293_2.fastq.gz",
"ERR751315_2.fastq.gz",
"ERR751317_1.fastq.gz",
"ERR751317_2.fastq.gz",
"ERR751318_1.fastq.gz",
"ERR751318_2.fastq.gz",
"ERR751319_1.fastq.gz",
"ERR751295_1.fastq.gz",
"ERR751292_1.fastq.gz",
"ERR751310_1.fastq.gz",
"ERR751314_1.fastq.gz",
"ERR751313_2.fastq.gz",
"ERR751314_2.fastq.gz",
"ERR751319_2.fastq.gz",
"ERR751315_1.fastq.gz",
"ERR751320_2.fastq.gz",
"ERR751320_1.fastq.gz",
"ERR751321_1.fastq.gz",
"ERR751321_2.fastq.gz",
"ERR751322_1.fastq.gz",
"ERR751326_1.fastq.gz",
"ERR751326_2.fastq.gz",
"ERR751327_1.fastq.gz",
"ERR751327_2.fastq.gz",
"ERR751328_1.fastq.gz",
"ERR751328_2.fastq.gz",
"ERR751329_1.fastq.gz",
"ERR751329_2.fastq.gz",
"ERR751330_1.fastq.gz",
"ERR751330_2.fastq.gz",
"ERR751324_1.fastq.gz",
"ERR751322_2.fastq.gz",
"ERR751323_1.fastq.gz",
"ERR751323_2.fastq.gz",
"ERR751325_1.fastq.gz",
"ERR751324_2.fastq.gz",
"ERR751325_2.fastq.gz",
"ERR751332_2.fastq.gz",
"ERR751335_1.fastq.gz",
"ERR751335_2.fastq.gz",
"ERR751336_1.fastq.gz",
"ERR751336_2.fastq.gz",
"ERR751337_1.fastq.gz",
"ERR751337_2.fastq.gz",
"ERR751338_1.fastq.gz",
"ERR751338_2.fastq.gz",
"ERR751339_1.fastq.gz",
"ERR751339_2.fastq.gz",
"ERR751341_1.fastq.gz",
"ERR751341_2.fastq.gz",
"ERR751342_1.fastq.gz",
"ERR751331_1.fastq.gz",
"ERR751331_2.fastq.gz",
"ERR751332_1.fastq.gz",
"ERR751333_2.fastq.gz",
"ERR751333_1.fastq.gz",
"ERR751334_1.fastq.gz",
"ERR751345_2.fastq.gz",
"ERR751346_1.fastq.gz",
"ERR751346_2.fastq.gz",
"ERR751347_1.fastq.gz",
"ERR751347_2.fastq.gz",
"ERR751348_1.fastq.gz",
"ERR751348_2.fastq.gz",
"ERR751349_1.fastq.gz",
"ERR751349_2.fastq.gz",
"ERR841438.fastq.gz",
"ERR841440.fastq.gz",
"ERR841441.fastq.gz",
"ERR845916_1.fastq.gz",
"ERR845916_2.fastq.gz",
"ERR868539_1.fastq.gz",
"ERR868539_2.fastq.gz",
"ERR909753_1.fastq.gz",
"ERR909753_2.fastq.gz",
"ERR909754_1.fastq.gz",
"ERR909754_2.fastq.gz",
"ERR1082113_1.fastq.gz",
"ERR1082113_2.fastq.gz",
"ERR1082114_1.fastq.gz",
"ERR1082114_2.fastq.gz",
"ERR1082115_1.fastq.gz",
"ERR1082115_2.fastq.gz",
"ERR1082116_1.fastq.gz",
"ERR1082116_2.fastq.gz",
"ERR1082117_1.fastq.gz",
"ERR1082117_2.fastq.gz",
"ERR1082118_1.fastq.gz",
"ERR1082118_2.fastq.gz",
"ERR1082119_1.fastq.gz",
"ERR1082119_2.fastq.gz",
"ERR1082120_1.fastq.gz",
"ERR1082120_2.fastq.gz",
"ERR1082121_1.fastq.gz",
"ERR1082121_2.fastq.gz",
"ERR751343_1.fastq.gz",
"ERR751344_2.fastq.gz",
"ERR751344_1.fastq.gz",
"ERR751345_1.fastq.gz",
"ERR751343_2.fastq.gz",
"ERR751334_2.fastq.gz",
"ERR751342_2.fastq.gz",
"ERR1082123_1.fastq.gz",
"ERR1082126_1.fastq.gz",
"ERR1082126_2.fastq.gz",
"ERR1082127_1.fastq.gz",
"ERR1082127_2.fastq.gz",
"ERR1082128_1.fastq.gz",
"ERR1082128_2.fastq.gz",
"ERR1082129_1.fastq.gz",
"ERR1082129_2.fastq.gz",
"ERR1082130_1.fastq.gz",
"ERR1082130_2.fastq.gz",
"ERR1082131_1.fastq.gz",
"ERR1082131_2.fastq.gz",
"ERR1082132_1.fastq.gz",
"ERR1082132_2.fastq.gz",
"ERR1082133_1.fastq.gz",
"ERR1082133_2.fastq.gz",
"ERR1082134_1.fastq.gz",
"ERR1082123_2.fastq.gz",
"ERR1082135_1.fastq.gz",
"ERR1082135_2.fastq.gz",
"ERR1082136_1.fastq.gz",
"ERR1082136_2.fastq.gz",
"ERR1082137_1.fastq.gz",
"ERR1082124_1.fastq.gz",
"ERR1082124_2.fastq.gz",
"ERR1082125_1.fastq.gz",
"ERR1082122_1.fastq.gz",
"ERR1082122_2.fastq.gz",
"ERR1082125_2.fastq.gz",
"ERR1082134_2.fastq.gz",
"ERR1082141_1.fastq.gz",
"ERR1082141_2.fastq.gz",
"ERR1082138_2.fastq.gz",
"ERR1082142_2.fastq.gz",
"ERR1082143_1.fastq.gz",
"ERR1082143_2.fastq.gz",
"ERR1203053_1.fastq.gz",
"ERR1203053_2.fastq.gz",
"ERR1203054_1.fastq.gz",
"ERR1203054_2.fastq.gz",
"ERR1203055_1.fastq.gz",
"ERR1203055_2.fastq.gz",
"ERR1203056_1.fastq.gz",
"ERR1082139_1.fastq.gz",
"ERR1082137_2.fastq.gz",
"ERR1082138_1.fastq.gz",
"ERR1082142_1.fastq.gz",
"ERR1082140_1.fastq.gz",
"ERR1082139_2.fastq.gz",
"ERR1082140_2.fastq.gz",
"ERR1203061_1.fastq.gz",
"ERR1203061_2.fastq.gz",
"ERR1203062_1.fastq.gz",
"ERR1203062_2.fastq.gz",
"ERR1203065_1.fastq.gz",
"ERR1203065_2.fastq.gz",
"ERR1203066_1.fastq.gz",
"ERR1203066_2.fastq.gz",
"ERR1203068_1.fastq.gz",
"ERR1203068_2.fastq.gz",
"ERR1203058_1.fastq.gz",
"ERR1203056_2.fastq.gz",
"ERR1203057_1.fastq.gz",
"ERR1203070_2.fastq.gz",
"ERR1203072_1.fastq.gz",
"ERR1203069_2.fastq.gz",
"ERR1203074_1.fastq.gz",
"ERR1203074_2.fastq.gz",
"ERR1203076_1.fastq.gz",
"ERR1203076_2.fastq.gz",
"ERR1203077_1.fastq.gz",
"ERR1203077_2.fastq.gz",
"ERR1215460_1.fastq.gz",
"ERR1215460_2.fastq.gz",
"ERR1215461_1.fastq.gz",
"ERR1215461_2.fastq.gz",
"ERR1215462_1.fastq.gz",
"ERR1215462_2.fastq.gz",
"ERR1215463_1.fastq.gz",
"ERR1215463_2.fastq.gz",
"ERR1215464_1.fastq.gz",
"ERR1215464_2.fastq.gz",
"ERR1215465_1.fastq.gz",
"ERR1215465_2.fastq.gz",
"ERR1215466_1.fastq.gz",
"ERR1203058_2.fastq.gz",
"ERR1203059_2.fastq.gz",
"ERR1203070_1.fastq.gz",
"ERR1203069_1.fastq.gz",
"ERR1203072_2.fastq.gz",
"ERR1203057_2.fastq.gz",
"ERR1203059_1.fastq.gz",
"ERR1215467_2.fastq.gz",
"ERR1215470_2.fastq.gz",
"ERR1215471_1.fastq.gz",
"ERR1215470_1.fastq.gz",
"ERR1215472_1.fastq.gz",
"ERR1215472_2.fastq.gz",
"ERR1215473_1.fastq.gz",
"ERR1215473_2.fastq.gz",
"ERR1215474_1.fastq.gz",
"ERR1215474_2.fastq.gz",
"ERR1215475_1.fastq.gz",
"ERR1215475_2.fastq.gz",
"ERR1215471_2.fastq.gz",
"ERR1215468_1.fastq.gz",
"ERR1215468_2.fastq.gz",
"ERR1215469_1.fastq.gz",
"ERR1215469_2.fastq.gz",
"ERR1215466_2.fastq.gz",
"ERR1215467_1.fastq.gz",
"ERR1215479_2.fastq.gz",
"ERR1215480_1.fastq.gz",
"ERR1215480_2.fastq.gz",
"ERR1215481_1.fastq.gz",
"ERR1215481_2.fastq.gz",
"ERR1215482_1.fastq.gz",
"ERR1215482_2.fastq.gz",
"ERR1215483_1.fastq.gz",
"ERR1215483_2.fastq.gz",
"ERR1334049_1.fastq.gz",
"ERR1215477_2.fastq.gz",
"ERR1334050_1.fastq.gz",
"ERR1334050_2.fastq.gz",
"ERR1334051_1.fastq.gz",
"ERR1334049_2.fastq.gz",
"ERR1334052_1.fastq.gz",
"ERR1334052_2.fastq.gz",
"ERR1334053_1.fastq.gz",
"ERR1334053_2.fastq.gz",
"ERR2181457.fastq.gz",
"ERR2181458_1.fastq.gz",
"ERR2181458_2.fastq.gz",
"ERR1215476_2.fastq.gz",
"ERR1215477_1.fastq.gz",
"ERR1215478_2.fastq.gz",
"ERR1215478_1.fastq.gz",
"ERR1215479_1.fastq.gz",
"ERR1334051_2.fastq.gz",
"ERR1215476_1.fastq.gz"
]
# TODO find out which genomes remain to be downloaded
all_genome_names = list(map(lambda x:x.split("/")[-1], all_ftp_links))
all_remaining_genome_names = list(set(all_genome_names) - set(completed_and_uploaded_genomes))
# >>> from collections import Counter
# ...
# ... d = dict(Counter(all_genome_names))
ftp_of_all_remaining_genomes = []
for a_genome in all_remaining_genome_names:
for an_ftp in all_ftp_links:
if an_ftp.split("/")[-1] == a_genome:
ftp_of_all_remaining_genomes.append(an_ftp)
# ftp_of_all_remaining_genomes = [
# ]
| 49.534743
| 94
| 0.761629
| 8,784
| 49,188
| 4.153233
| 0.035861
| 0.181898
| 0.116934
| 0.155913
| 0.76191
| 0.759936
| 0.759936
| 0.759936
| 0.759936
| 0.759087
| 0
| 0.256099
| 0.021672
| 49,188
| 992
| 95
| 49.584677
| 0.502016
| 0.003375
| 0
| 0
| 0
| 0.492212
| 0.912164
| 0.783318
| 0
| 0
| 0
| 0.001008
| 0
| 1
| 0
| false
| 0
| 0.004154
| 0
| 0.004154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
57e3d55853ea296a5a212b0334893253d563bc61
| 8,031
|
py
|
Python
|
unittests/test_property_dictionary.py
|
USGS-EROS/python-property-dictionary
|
476a1c9d9ab52dea13629e8f236fd6604125282b
|
[
"Unlicense"
] | 3
|
2020-06-27T04:17:14.000Z
|
2021-04-16T15:02:50.000Z
|
unittests/test_property_dictionary.py
|
USGS-EROS/python-property-dictionary
|
476a1c9d9ab52dea13629e8f236fd6604125282b
|
[
"Unlicense"
] | 1
|
2017-05-01T15:51:50.000Z
|
2017-05-01T15:51:50.000Z
|
unittests/test_property_dictionary.py
|
USGS-EROS/python-property-dictionary
|
476a1c9d9ab52dea13629e8f236fd6604125282b
|
[
"Unlicense"
] | 6
|
2016-04-26T19:28:16.000Z
|
2017-06-25T16:39:59.000Z
|
import unittest
from espa import PropertyDict
class TestPropertyDict(unittest.TestCase):
def setUp(self):
self.dict_a = {'a': 1,
'b': 2,
'c': [3, 4, 5],
'd': [{'a': 1}, {'b': 2}]}
self.dict_b = {'z': {'a': 7}, 'zz': {'b': 8}}
def tearDown(self):
pass
def test_dict_with_lists(self):
"""Test dictionary with lists of dicts and general access"""
x = PropertyDict(self.dict_a) # This performs a deep copy
self.assertEqual(x.a, 1)
self.assertEqual(x['a'], 1)
self.assertEqual(x.b, 2)
self.assertEqual(x['b'], 2)
self.assertEqual(x.c[0], 3)
self.assertEqual(x.c[1], 4)
self.assertEqual(x.c[2], 5)
self.assertEqual(x['c'][0], 3)
self.assertEqual(x['c'][1], 4)
self.assertEqual(x['c'][2], 5)
self.assertEqual(x.d[0].a, 1)
self.assertEqual(x.d[1].b, 2)
self.assertEqual(x.d[0]['a'], 1)
self.assertEqual(x.d[1]['b'], 2)
self.assertEqual(x['d'][0].a, 1)
self.assertEqual(x['d'][1].b, 2)
self.assertEqual(x['d'][0]['a'], 1)
self.assertEqual(x['d'][1]['b'], 2)
# Check a non-existent attribute
self.assertIsNone(x.rock)
# Now add one on the fly
x.rock = [1, 2, 3]
self.assertEqual(x.rock[0], 1)
self.assertEqual(x.rock[1], 2)
self.assertEqual(x.rock[2], 3)
self.assertEqual(x['rock'][0], 1)
self.assertEqual(x['rock'][1], 2)
self.assertEqual(x['rock'][2], 3)
# Now add one on the fly using traditional notation
x['rock2'] = [1, 2, 3]
self.assertEqual(x.rock2[0], 1)
self.assertEqual(x.rock2[1], 2)
self.assertEqual(x.rock2[2], 3)
self.assertEqual(x['rock2'][0], 1)
self.assertEqual(x['rock2'][1], 2)
self.assertEqual(x['rock2'][2], 3)
# Check adding a list of dictionaries on the fly
x.rocky = [{'a': 4}, {'b': 5}, {'c': 6}]
self.assertEqual(x.rocky[0].a, 4)
self.assertEqual(x.rocky[1].b, 5)
self.assertEqual(x.rocky[2].c, 6)
self.assertEqual(x.rocky[0]['a'], 4)
self.assertEqual(x.rocky[1]['b'], 5)
self.assertEqual(x.rocky[2]['c'], 6)
self.assertEqual(x['rocky'][0].a, 4)
self.assertEqual(x['rocky'][1].b, 5)
self.assertEqual(x['rocky'][2].c, 6)
self.assertEqual(x['rocky'][0]['a'], 4)
self.assertEqual(x['rocky'][1]['b'], 5)
self.assertEqual(x['rocky'][2]['c'], 6)
# Test deleting an attribute
del x.a
self.assertIsNone(x.a)
def test_dict_with_dicts(self):
"""Test dictionaries with dictionaries"""
#self.dict_b = {'z': {'a': 7}, 'zz': {'b': 8}}
x = PropertyDict(self.dict_b) # This performs a deep copy
self.assertEqual(x.z.a, 7)
self.assertEqual(x.zz.b, 8)
self.assertEqual(x.z['a'], 7)
self.assertEqual(x.zz['b'], 8)
self.assertEqual(x['z'].a, 7)
self.assertEqual(x['zz'].b, 8)
self.assertEqual(x['z']['a'], 7)
self.assertEqual(x['zz']['b'], 8)
def test_parse(self):
"""Test the parse class method"""
x = PropertyDict.parse(self.dict_b) # This performs a deep copy
self.assertEqual(x.z.a, 7)
self.assertEqual(x.zz.b, 8)
self.assertEqual(x.z['a'], 7)
self.assertEqual(x.zz['b'], 8)
self.assertEqual(x['z'].a, 7)
self.assertEqual(x['zz'].b, 8)
self.assertEqual(x['z']['a'], 7)
self.assertEqual(x['zz']['b'], 8)
def test_empty(self):
"""Start with an empty object"""
x = PropertyDict()
self.assertIsNone(x.couch)
self.assertIsNone(x.potato)
x.couch = self.dict_b
x.potato = self.dict_a
self.assertEqual(x.couch.z.a, 7)
self.assertEqual(x.couch.zz.b, 8)
self.assertEqual(x.couch.z['a'], 7)
self.assertEqual(x.couch.zz['b'], 8)
self.assertEqual(x.couch['z'].a, 7)
self.assertEqual(x.couch['zz'].b, 8)
self.assertEqual(x.couch['z']['a'], 7)
self.assertEqual(x.couch['zz']['b'], 8)
self.assertEqual(x['couch'].z.a, 7)
self.assertEqual(x['couch'].zz.b, 8)
self.assertEqual(x['couch'].z['a'], 7)
self.assertEqual(x['couch'].zz['b'], 8)
self.assertEqual(x['couch']['z'].a, 7)
self.assertEqual(x['couch']['zz'].b, 8)
self.assertEqual(x['couch']['z']['a'], 7)
self.assertEqual(x['couch']['zz']['b'], 8)
self.assertEqual(x.potato.a, 1)
self.assertEqual(x.potato['a'], 1)
self.assertEqual(x.potato.b, 2)
self.assertEqual(x.potato['b'], 2)
self.assertEqual(x.potato.c[0], 3)
self.assertEqual(x.potato.c[1], 4)
self.assertEqual(x.potato.c[2], 5)
self.assertEqual(x.potato['c'][0], 3)
self.assertEqual(x.potato['c'][1], 4)
self.assertEqual(x.potato['c'][2], 5)
self.assertEqual(x.potato.d[0].a, 1)
self.assertEqual(x.potato.d[1].b, 2)
self.assertEqual(x.potato.d[0]['a'], 1)
self.assertEqual(x.potato.d[1]['b'], 2)
self.assertEqual(x.potato['d'][0].a, 1)
self.assertEqual(x.potato['d'][1].b, 2)
self.assertEqual(x.potato['d'][0]['a'], 1)
self.assertEqual(x.potato['d'][1]['b'], 2)
self.assertEqual(x['potato'].a, 1)
self.assertEqual(x['potato']['a'], 1)
self.assertEqual(x['potato'].b, 2)
self.assertEqual(x['potato']['b'], 2)
self.assertEqual(x['potato'].c[0], 3)
self.assertEqual(x['potato'].c[1], 4)
self.assertEqual(x['potato'].c[2], 5)
self.assertEqual(x['potato']['c'][0], 3)
self.assertEqual(x['potato']['c'][1], 4)
self.assertEqual(x['potato']['c'][2], 5)
self.assertEqual(x['potato'].d[0].a, 1)
self.assertEqual(x['potato'].d[1].b, 2)
self.assertEqual(x['potato'].d[0]['a'], 1)
self.assertEqual(x['potato'].d[1]['b'], 2)
self.assertEqual(x['potato']['d'][0].a, 1)
self.assertEqual(x['potato']['d'][1].b, 2)
self.assertEqual(x['potato']['d'][0]['a'], 1)
self.assertEqual(x['potato']['d'][1]['b'], 2)
def test_args_and_kwargs(self):
"""Test using args and kwargs to create"""
x = PropertyDict(self.dict_b,
self.dict_a,
peter={'d': 20},
pan=[40, 41, 42])
self.assertEqual(x.z.a, 7)
self.assertEqual(x.zz.b, 8)
self.assertEqual(x.z['a'], 7)
self.assertEqual(x.zz['b'], 8)
self.assertEqual(x['z'].a, 7)
self.assertEqual(x['zz'].b, 8)
self.assertEqual(x['z']['a'], 7)
self.assertEqual(x['zz']['b'], 8)
self.assertEqual(x.a, 1)
self.assertEqual(x['a'], 1)
self.assertEqual(x.b, 2)
self.assertEqual(x['b'], 2)
self.assertEqual(x.c[0], 3)
self.assertEqual(x.c[1], 4)
self.assertEqual(x.c[2], 5)
self.assertEqual(x['c'][0], 3)
self.assertEqual(x['c'][1], 4)
self.assertEqual(x['c'][2], 5)
self.assertEqual(x.d[0].a, 1)
self.assertEqual(x.d[1].b, 2)
self.assertEqual(x.d[0]['a'], 1)
self.assertEqual(x.d[1]['b'], 2)
self.assertEqual(x['d'][0].a, 1)
self.assertEqual(x['d'][1].b, 2)
self.assertEqual(x['d'][0]['a'], 1)
self.assertEqual(x['d'][1]['b'], 2)
self.assertEqual(x.peter.d, 20)
self.assertEqual(x.peter['d'], 20)
self.assertEqual(x['peter'].d, 20)
self.assertEqual(x['peter']['d'], 20)
self.assertEqual(x.pan[0], 40)
self.assertEqual(x.pan[1], 41)
self.assertEqual(x.pan[2], 42)
self.assertEqual(x['pan'][0], 40)
self.assertEqual(x['pan'][1], 41)
self.assertEqual(x['pan'][2], 42)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 35.378855
| 72
| 0.524841
| 1,198
| 8,031
| 3.494157
| 0.073456
| 0.523172
| 0.558051
| 0.189202
| 0.824176
| 0.815815
| 0.807215
| 0.807215
| 0.802198
| 0.794553
| 0
| 0.046864
| 0.261362
| 8,031
| 226
| 73
| 35.535398
| 0.6588
| 0.060266
| 0
| 0.32967
| 0
| 0
| 0.048196
| 0
| 0
| 0
| 0
| 0
| 0.824176
| 1
| 0.038462
| false
| 0.005495
| 0.010989
| 0
| 0.054945
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
aa2acb25c40a8d0ecedda30f94dfb8ac746a886f
| 2,743
|
py
|
Python
|
LR_3/main.py
|
Gr1dlock/AnalysisOfAlgorithms
|
90da2704e9147e1c620f5476e3e1d7ac7cdd72ec
|
[
"MIT"
] | 1
|
2021-08-30T11:20:45.000Z
|
2021-08-30T11:20:45.000Z
|
LR_3/main.py
|
Gr1dlock/AnalysisOfAlgorithms
|
90da2704e9147e1c620f5476e3e1d7ac7cdd72ec
|
[
"MIT"
] | null | null | null |
LR_3/main.py
|
Gr1dlock/AnalysisOfAlgorithms
|
90da2704e9147e1c620f5476e3e1d7ac7cdd72ec
|
[
"MIT"
] | null | null | null |
import sorting
import time
import random
def time_count():
for i in range(100, 1100, 100):
insertion_time = 0
comb_time = 0
quick_time = 0
arr = [random.randint(0, 1000) for k in range(i)]
for j in range(100):
time_1 = time.perf_counter()
res = sorting.insertion(arr)
time_2 = time.perf_counter()
res = sorting.comb(arr)
time_3 = time.perf_counter()
res = sorting.quick(arr)
time_4 = time.perf_counter()
insertion_time += time_2 - time_1
comb_time += time_3 - time_2
quick_time += time_4 - time_3
print('Random array length: ', i)
print('Insertion sort: {:7f}'.format(insertion_time / 100))
print('Comb sort: {:7f}'.format(comb_time / 100))
print('Quick sort: {:7f}'.format(quick_time / 100))
print()
for i in range(100, 1100, 100):
insertion_time = 0
comb_time = 0
quick_time = 0
arr = [k for k in range(i)]
for j in range(100):
time_1 = time.perf_counter()
res = sorting.insertion(arr)
time_2 = time.perf_counter()
res = sorting.comb(arr)
time_3 = time.perf_counter()
res = sorting.quick(arr)
time_4 = time.perf_counter()
insertion_time += time_2 - time_1
comb_time += time_3 - time_2
quick_time += time_4 - time_3
print('Sorted array length: ', i)
print('Insertion sort: {:7f}'.format(insertion_time / 100))
print('Comb sort: {:7f}'.format(comb_time / 100))
print('Quick sort: {:7f}'.format(quick_time / 100))
print()
for i in range(100, 1100, 100):
insertion_time = 0
comb_time = 0
quick_time = 0
arr = [k for k in range(i, 0, -1)]
for j in range(100):
time_1 = time.perf_counter()
res = sorting.insertion(arr)
time_2 = time.perf_counter()
res = sorting.comb(arr)
time_3 = time.perf_counter()
res = sorting.quick(arr)
time_4 = time.perf_counter()
insertion_time += time_2 - time_1
comb_time += time_3 - time_2
quick_time += time_4 - time_3
print('Inverse array length: ', i)
print('Insertion sort: {:7f}'.format(insertion_time / 100))
print('Comb sort: {:7f}'.format(comb_time / 100))
print('Quick sort: {:7f}'.format(quick_time / 100))
print()
if __name__ == '__main__':
arr = [1]
res = sorting.insertion(arr)
print(res)
res = sorting.comb(arr)
print(res)
res = sorting.quick(arr)
print(res)
| 33.45122
| 67
| 0.543565
| 361
| 2,743
| 3.914127
| 0.108033
| 0.067941
| 0.127389
| 0.11465
| 0.901628
| 0.871904
| 0.871904
| 0.871904
| 0.871904
| 0.871904
| 0
| 0.067107
| 0.337222
| 2,743
| 81
| 68
| 33.864198
| 0.710121
| 0
| 0
| 0.84
| 0
| 0
| 0.085339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013333
| false
| 0
| 0.04
| 0
| 0.053333
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
104bb466e66f85ea53446abecee46780c09544d4
| 4,856
|
py
|
Python
|
cipefilos/api/migrations/0001_initial.py
|
JesusJimenezG/cipefilos
|
537c80a919a324d1c3d082aa089a590aa742fece
|
[
"MIT"
] | null | null | null |
cipefilos/api/migrations/0001_initial.py
|
JesusJimenezG/cipefilos
|
537c80a919a324d1c3d082aa089a590aa742fece
|
[
"MIT"
] | null | null | null |
cipefilos/api/migrations/0001_initial.py
|
JesusJimenezG/cipefilos
|
537c80a919a324d1c3d082aa089a590aa742fece
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.9 on 2020-08-17 22:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Actores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=150)),
('nacimiento', models.DateField()),
('nacionalidad', models.CharField(max_length=150)),
('imagen', models.URLField(default='https://m.media-amazon.com/images/G/01/imdb/images/nopicture/medium/name-2135195744._CB466677935_.png', help_text='De imdb mismo')),
],
options={
'ordering': ['nombre'],
},
),
migrations.CreateModel(
name='Casting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('actores', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.Actores')),
],
),
migrations.CreateModel(
name='Directores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=150)),
('nacimiento', models.DateField()),
('nacionalidad', models.CharField(max_length=150)),
('imagen', models.URLField(default='https://m.media-amazon.com/images/G/01/imdb/images/nopicture/medium/name-2135195744._CB466677935_.png', help_text='De imdb mismo')),
],
options={
'ordering': ['nombre'],
},
),
migrations.CreateModel(
name='Pelicula',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=150)),
('estreno', models.IntegerField(default=2000)),
('imagen', models.URLField(help_text='De imdb mismo')),
('resumen', models.TextField(help_text='Descripción corta')),
('actores', models.ManyToManyField(default=None, through='api.Casting', to='api.Actores')),
('director', models.ManyToManyField(default=None, through='api.Casting', to='api.Directores')),
],
options={
'ordering': ['titulo'],
},
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=150)),
('estreno', models.IntegerField(default=2000)),
('temporadas', models.IntegerField(default=1)),
('episodios', models.IntegerField(default=1)),
('imagen', models.URLField(help_text='De imdb mismo')),
('resumen', models.TextField(help_text='Descripción corta')),
('actores', models.ManyToManyField(default=None, through='api.Casting', to='api.Actores')),
('director', models.ManyToManyField(default=None, through='api.Casting', to='api.Directores')),
],
options={
'ordering': ['titulo'],
},
),
migrations.CreateModel(
name='PeliculaFavorita',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pelicula', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Pelicula')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='casting',
name='directores',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.Directores'),
),
migrations.AddField(
model_name='casting',
name='pelicula',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.Pelicula'),
),
migrations.AddField(
model_name='casting',
name='serie',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.Series'),
),
]
| 45.811321
| 184
| 0.5729
| 464
| 4,856
| 5.892241
| 0.224138
| 0.016459
| 0.035845
| 0.056328
| 0.815655
| 0.815655
| 0.773958
| 0.773958
| 0.773958
| 0.773958
| 0
| 0.024327
| 0.280478
| 4,856
| 105
| 185
| 46.247619
| 0.758157
| 0.009267
| 0
| 0.683673
| 1
| 0.020408
| 0.168434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030612
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1075843803867449faa59b2c40f23936cf808aa7
| 212
|
py
|
Python
|
tests/interfaces/test_hashdf.py
|
Ovakefali13/pyiron_base
|
84a9b0c46dab434a7f7c14f987392f6fede68317
|
[
"BSD-3-Clause"
] | 7
|
2020-09-12T11:01:09.000Z
|
2022-03-01T20:59:46.000Z
|
tests/interfaces/test_hashdf.py
|
Ovakefali13/pyiron_base
|
84a9b0c46dab434a7f7c14f987392f6fede68317
|
[
"BSD-3-Clause"
] | 417
|
2018-07-03T12:44:00.000Z
|
2022-03-31T14:25:31.000Z
|
tests/interfaces/test_hashdf.py
|
Ovakefali13/pyiron_base
|
84a9b0c46dab434a7f7c14f987392f6fede68317
|
[
"BSD-3-Clause"
] | 8
|
2018-04-03T05:21:07.000Z
|
2021-12-27T09:55:19.000Z
|
import pyiron_base.interfaces.has_hdf
from pyiron_base._tests import PyironTestCase
class TestHasHDF(PyironTestCase):
@property
def docstring_module(self):
return pyiron_base.interfaces.has_hdf
| 23.555556
| 45
| 0.79717
| 26
| 212
| 6.230769
| 0.653846
| 0.185185
| 0.246914
| 0.283951
| 0.320988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146226
| 212
| 8
| 46
| 26.5
| 0.895028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5e0e874186f22a21f39d2a9daeb6b730c266f15f
| 127
|
py
|
Python
|
pyramda/relation/min_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 124
|
2015-07-30T21:34:25.000Z
|
2022-02-19T08:45:50.000Z
|
pyramda/relation/min_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 37
|
2015-08-31T23:02:20.000Z
|
2022-02-04T04:45:28.000Z
|
pyramda/relation/min_test.py
|
sergiors/pyramda
|
5bf200888809b1bc946e813e29460f204bccd13e
|
[
"MIT"
] | 20
|
2015-08-04T18:59:09.000Z
|
2021-12-13T08:08:59.000Z
|
from .min import min
from pyramda.private.asserts import assert_equal
def min_test():
assert_equal(min([3, 1, 4, 2]), 1)
| 18.142857
| 48
| 0.708661
| 22
| 127
| 3.954545
| 0.636364
| 0.252874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04717
| 0.165354
| 127
| 6
| 49
| 21.166667
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
eaf0425b0c373044f4b5badd325f4afc4795d0e8
| 321
|
py
|
Python
|
projeto_e_exemplos/configs.py
|
luizanisio/PesquisaTextualBR
|
1e423901fef83c59dea9b20dcc404ef90c3e781f
|
[
"MIT"
] | null | null | null |
projeto_e_exemplos/configs.py
|
luizanisio/PesquisaTextualBR
|
1e423901fef83c59dea9b20dcc404ef90c3e781f
|
[
"MIT"
] | null | null | null |
projeto_e_exemplos/configs.py
|
luizanisio/PesquisaTextualBR
|
1e423901fef83c59dea9b20dcc404ef90c3e781f
|
[
"MIT"
] | null | null | null |
DB_CONFIG_DEV = {'host': 'host',
'usuario':'usr_pesquisabr',
'senha':'senhapesquisabr',
'database': 'pesquisabr' }
DB_CONFIG_PROD = {'host': 'host',
'usuario':'usr_pesquisabr',
'senha':'senhapesquisabr',
'database': 'pesquisabr' }
| 29.181818
| 41
| 0.501558
| 24
| 321
| 6.458333
| 0.458333
| 0.103226
| 0.193548
| 0.232258
| 0.851613
| 0.851613
| 0.851613
| 0.851613
| 0.851613
| 0
| 0
| 0
| 0.336449
| 321
| 10
| 42
| 32.1
| 0.7277
| 0
| 0
| 0.75
| 0
| 0
| 0.430868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d83899054b2b8f50dbeca4807e4012d2ad050612
| 133
|
py
|
Python
|
Chapter4_Packages/1_Packaging_2/my_package/utils/printing/__init__.py
|
franneck94/UdemyPythonProEng
|
fd25dc2e25eaf2af15935560c9fda23470298b39
|
[
"MIT"
] | 2
|
2021-08-04T21:30:33.000Z
|
2021-11-07T19:43:30.000Z
|
Chapter4_Packages/1_Packaging_2/my_package/utils/printing/__init__.py
|
franneck94/UdemyPythonProEng
|
fd25dc2e25eaf2af15935560c9fda23470298b39
|
[
"MIT"
] | null | null | null |
Chapter4_Packages/1_Packaging_2/my_package/utils/printing/__init__.py
|
franneck94/UdemyPythonProEng
|
fd25dc2e25eaf2af15935560c9fda23470298b39
|
[
"MIT"
] | 2
|
2021-01-15T06:06:11.000Z
|
2022-02-25T02:56:02.000Z
|
from ._printing import print_hello_world
from ._printing import print_name
__all__ = [
"print_hello_world",
"print_name"
]
| 14.777778
| 40
| 0.744361
| 17
| 133
| 5.117647
| 0.470588
| 0.275862
| 0.413793
| 0.528736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180451
| 133
| 8
| 41
| 16.625
| 0.798165
| 0
| 0
| 0
| 0
| 0
| 0.203008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
dc1b26f1d2176175ea6f3a477808865cb7741505
| 1,945
|
py
|
Python
|
tests/pipe_proc_tests/mir.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 150
|
2015-01-16T12:24:13.000Z
|
2022-03-03T18:01:18.000Z
|
tests/pipe_proc_tests/mir.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 129
|
2015-01-13T04:58:56.000Z
|
2022-03-02T13:39:16.000Z
|
tests/pipe_proc_tests/mir.py
|
genematx/nmrglue
|
8a24cf6cbd18451e552fc0673b84c42d1dcb69a2
|
[
"BSD-3-Clause"
] | 88
|
2015-02-16T20:04:12.000Z
|
2022-03-10T06:50:30.000Z
|
#! /usr/bin/env python
""" tests for MIR function """
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="left", sw=True)
pipe.write("mir1.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="right", sw=True)
pipe.write("mir2.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="right", invr=True, sw=True)
pipe.write("mir3.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="left", invr=True, sw=True)
pipe.write("mir4.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="center", sw=True)
pipe.write("mir5.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="ps90-180", sw=True)
pipe.write("mir6.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.mir(d, a, mode="ps0-0", sw=True)
pipe.write("mir7.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="left", sw=True)
pipe.write("mir8.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="right", sw=True)
pipe.write("mir9.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="right", invr=True, sw=True)
pipe.write("mir10.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="left", invr=True, sw=True)
pipe.write("mir11.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="center", sw=True)
pipe.write("mir12.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="ps90-180", sw=True)
pipe.write("mir13.glue", d, a, overwrite=True)
d, a = pipe.read("1D_freq_real.dat")
d, a = p.mir(d, a, mode="ps0-0", sw=True)
pipe.write("mir14.glue", d, a, overwrite=True)
| 31.370968
| 52
| 0.643702
| 390
| 1,945
| 3.153846
| 0.138462
| 0.091057
| 0.068293
| 0.113821
| 0.873984
| 0.858537
| 0.858537
| 0.858537
| 0.858537
| 0.858537
| 0
| 0.023543
| 0.126478
| 1,945
| 61
| 53
| 31.885246
| 0.700412
| 0.023136
| 0
| 0.636364
| 0
| 0
| 0.226624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc42027924f2e7761f462790b1777952766fdbf2
| 134
|
py
|
Python
|
tests/test_cli.py
|
simonbowly/git-commit-untodo
|
16918e3001ee269473ecae8af218249e0cf8005d
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
simonbowly/git-commit-untodo
|
16918e3001ee269473ecae8af218249e0cf8005d
|
[
"MIT"
] | 9
|
2020-04-14T23:19:23.000Z
|
2020-04-15T00:20:54.000Z
|
tests/test_cli.py
|
simonbowly/git-commit-untodo
|
16918e3001ee269473ecae8af218249e0cf8005d
|
[
"MIT"
] | null | null | null |
""" Just an import test for checking installation requirements. """
from git_commit_untodo.cli import cli
def test_cli():
pass
| 16.75
| 67
| 0.738806
| 19
| 134
| 5.052632
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 134
| 7
| 68
| 19.142857
| 0.872727
| 0.440299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
dc7ec567182b03b50033c79f1a0573b07f39fbe4
| 184
|
py
|
Python
|
code/app/main/views.py
|
sophilabs/dev-on-production
|
09dcf07944aa38002544022cb320995f368b680f
|
[
"BSD-3-Clause"
] | null | null | null |
code/app/main/views.py
|
sophilabs/dev-on-production
|
09dcf07944aa38002544022cb320995f368b680f
|
[
"BSD-3-Clause"
] | null | null | null |
code/app/main/views.py
|
sophilabs/dev-on-production
|
09dcf07944aa38002544022cb320995f368b680f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.template import RequestContext
from django.shortcuts import render_to_response
def index(request):
return render_to_response('index.html', {}, RequestContext(request))
| 36.8
| 72
| 0.815217
| 23
| 184
| 6.347826
| 0.608696
| 0.136986
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 184
| 5
| 72
| 36.8
| 0.879518
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
f4e7216a7dd035283e2c891b24391016e6141d6f
| 155
|
py
|
Python
|
Thomas_Test.py
|
rwawraf/handpose-control
|
e17c58864e12e928151516c111fd2cfe69f3cd88
|
[
"MIT"
] | null | null | null |
Thomas_Test.py
|
rwawraf/handpose-control
|
e17c58864e12e928151516c111fd2cfe69f3cd88
|
[
"MIT"
] | null | null | null |
Thomas_Test.py
|
rwawraf/handpose-control
|
e17c58864e12e928151516c111fd2cfe69f3cd88
|
[
"MIT"
] | 1
|
2021-06-25T07:23:41.000Z
|
2021-06-25T07:23:41.000Z
|
from classifier.centroid.train_centroid_classifier import train_centroid_classifier
import example_camera
train_centroid_classifier()
#example_camera()
| 22.142857
| 84
| 0.883871
| 19
| 155
| 6.842105
| 0.421053
| 0.3
| 0.530769
| 0.446154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 155
| 6
| 85
| 25.833333
| 0.889655
| 0.103226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f4fa6b9c26fc56ca4da9e1b38145ce2274740b1b
| 9,922
|
py
|
Python
|
tests/test_all_tables_e2e/test_single_table.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 742
|
2015-01-03T21:46:14.000Z
|
2022-03-27T05:49:32.000Z
|
tests/test_all_tables_e2e/test_single_table.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 64
|
2015-01-06T01:34:12.000Z
|
2020-05-07T21:52:11.000Z
|
tests/test_all_tables_e2e/test_single_table.py
|
mgedmin/terminaltables
|
ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc
|
[
"MIT"
] | 96
|
2015-02-26T16:42:42.000Z
|
2022-02-06T14:00:24.000Z
|
"""SingleTable end to end testing on Linux/OSX."""
import pytest
from terminaltables import SingleTable
from terminaltables.terminal_io import IS_WINDOWS
pytestmark = pytest.mark.skipif(str(IS_WINDOWS))
def test_single_line():
"""Test single-lined cells."""
table_data = [
['Name', 'Color', 'Type'],
['Avocado', 'green', 'nut'],
['Tomato', 'red', 'fruit'],
['Lettuce', 'green', 'vegetable'],
['Watermelon', 'green'],
[],
]
table = SingleTable(table_data, 'Example')
table.inner_footing_row_border = True
table.justify_columns[0] = 'left'
table.justify_columns[1] = 'center'
table.justify_columns[2] = 'right'
actual = table.table
expected = (
'\033(0\x6c\033(BExample\033(0\x71\x71\x71\x71\x71\x77\x71\x71\x71\x71\x71\x71\x71\x77\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x6b\033(B\n'
'\033(0\x78\033(B Name \033(0\x78\033(B Color \033(0\x78\033(B Type \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B Avocado \033(0\x78\033(B green \033(0\x78\033(B nut \033(0\x78\033(B\n'
'\033(0\x78\033(B Tomato \033(0\x78\033(B red \033(0\x78\033(B fruit \033(0\x78\033(B\n'
'\033(0\x78\033(B Lettuce \033(0\x78\033(B green \033(0\x78\033(B vegetable \033(0\x78\033(B\n'
'\033(0\x78\033(B Watermelon \033(0\x78\033(B green \033(0\x78\033(B \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B \033(0\x78\033(B \033(0\x78\033(B \033(0\x78\033(B\n'
'\033(0\x6d\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x76\x71\x71\x71\x71\x71\x71\x71\x76\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x6a\033(B'
)
assert actual == expected
def test_multi_line():
"""Test multi-lined cells."""
table_data = [
['Show', 'Characters'],
['Rugrats', 'Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille, Angelica Pickles,\nDil Pickles'],
['South Park', 'Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick']
]
table = SingleTable(table_data)
# Test defaults.
actual = table.table
expected = (
'\033(0\x6c\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x77\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6b\033(B\n'
'\033(0\x78\033(B Show \033(0\x78\033(B Characters '
' \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B Rugrats \033(0\x78\033(B Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille,'
' Angelica Pickles, \033(0\x78\033(B\n'
'\033(0\x78\033(B \033(0\x78\033(B Dil Pickles '
' \033(0\x78\033(B\n'
'\033(0\x78\033(B South Park \033(0\x78\033(B Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick '
' \033(0\x78\033(B\n'
'\033(0\x6d\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x76\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6a\033(B'
)
assert actual == expected
# Test inner row border.
table.inner_row_border = True
actual = table.table
expected = (
'\033(0\x6c\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x77\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6b\033(B\n'
'\033(0\x78\033(B Show \033(0\x78\033(B Characters '
' \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B Rugrats \033(0\x78\033(B Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille,'
' Angelica Pickles, \033(0\x78\033(B\n'
'\033(0\x78\033(B \033(0\x78\033(B Dil Pickles '
' \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B South Park \033(0\x78\033(B Stan Marsh, Kyle Broflovski, Eric Cartman, Kenny McCormick '
' \033(0\x78\033(B\n'
'\033(0\x6d\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x76\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6a\033(B'
)
assert actual == expected
# Justify right.
table.justify_columns = {1: 'right'}
actual = table.table
expected = (
'\033(0\x6c\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x77\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6b\033(B\n'
'\033(0\x78\033(B Show \033(0\x78\033(B '
' Characters \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B Rugrats \033(0\x78\033(B Tommy Pickles, Chuckie Finster, Phillip DeVille, Lillian DeVille,'
' Angelica Pickles, \033(0\x78\033(B\n'
'\033(0\x78\033(B \033(0\x78\033(B '
' Dil Pickles \033(0\x78\033(B\n'
'\033(0\x74\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6e\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x75\033(B\n'
'\033(0\x78\033(B South Park \033(0\x78\033(B Stan Marsh, Kyle Broflovski, '
'Eric Cartman, Kenny McCormick \033(0\x78\033(B\n'
'\033(0\x6d\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x76\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71'
'\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x71\x6a\033(B'
)
assert actual == expected
| 57.686047
| 120
| 0.604515
| 1,874
| 9,922
| 3.189968
| 0.051227
| 1.150217
| 1.674139
| 2.163934
| 0.884075
| 0.876715
| 0.876715
| 0.876715
| 0.861827
| 0.861827
| 0
| 0.385595
| 0.192602
| 9,922
| 171
| 121
| 58.023392
| 0.360629
| 0.014816
| 0
| 0.64
| 0
| 0.536
| 0.796044
| 0.526494
| 0
| 0
| 0
| 0
| 0.032
| 1
| 0.016
| false
| 0
| 0.024
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
523472d74fb2057b3820bcced906aacdd3f5f47a
| 17,904
|
py
|
Python
|
_unittests/ut_asv_benchmark/test_template_asv_benchmark.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 32
|
2018-03-04T23:33:30.000Z
|
2022-03-10T19:15:06.000Z
|
_unittests/ut_asv_benchmark/test_template_asv_benchmark.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 184
|
2017-11-30T14:10:35.000Z
|
2022-02-21T08:29:31.000Z
|
_unittests/ut_asv_benchmark/test_template_asv_benchmark.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 9
|
2019-07-24T13:18:00.000Z
|
2022-03-07T04:08:07.000Z
|
"""
@brief test log(time=2s)
"""
import os
import unittest
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx.common.exceptions import MissingShapeCalculator
from pyquickhelper.pycode import ExtTestCase
from mlprodict.tools.asv_options_helper import get_opset_number_from_onnx
from mlprodict.asv_benchmark.template.skl_model_classifier import (
TemplateBenchmarkClassifier)
from mlprodict.asv_benchmark.template.skl_model_classifier_raw_scores import (
TemplateBenchmarkClassifierRawScore)
from mlprodict.asv_benchmark.template.skl_model_clustering import (
TemplateBenchmarkClustering)
from mlprodict.asv_benchmark.template.skl_model_multi_classifier import (
TemplateBenchmarkMultiClassifier)
from mlprodict.asv_benchmark.template.skl_model_regressor import (
TemplateBenchmarkRegressor)
from mlprodict.asv_benchmark.template.skl_model_outlier import (
TemplateBenchmarkOutlier)
from mlprodict.asv_benchmark.template.skl_model_trainable_transform import (
TemplateBenchmarkTrainableTransform)
from mlprodict.asv_benchmark.template.skl_model_transform import (
TemplateBenchmarkTransform)
from mlprodict.asv_benchmark.template.skl_model_transform_positive import (
TemplateBenchmarkTransformPositive)
class TestAsvTemplateBenchmark(ExtTestCase):
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_classifier(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkClassifier()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt', 'ort']:
cl.setup(runtime, N, nf, opset, dtype, optim)
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
self.assertEqual(len(res), 24)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('time_predict', 'ort'), ('peakmem_predict', 'ort'),
('track_score', 'ort'), ('track_onnxsize', 'ort'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'ort'),
('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_classifier_raw_scores(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkClassifierRawScore()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt', 'ort']:
cl.setup(runtime, N, nf, opset, dtype, optim)
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
self.assertEqual(len(res), 24)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('time_predict', 'ort'), ('peakmem_predict', 'ort'),
('track_score', 'ort'), ('track_onnxsize', 'ort'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'ort'),
('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_clustering(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkClustering()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt']:
cl.setup(runtime, N, nf, opset, dtype, optim)
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
self.assertEqual(len(res), 16)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_regressor(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkRegressor()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt', 'ort']:
cl.setup(runtime, N, nf, opset, dtype, optim)
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
self.assertEqual(len(res), 24)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('time_predict', 'ort'), ('peakmem_predict', 'ort'),
('track_score', 'ort'), ('track_onnxsize', 'ort'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'ort'),
('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_multi_classifier(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkMultiClassifier()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt']:
try:
cl.setup(runtime, N, nf, opset, dtype, optim)
except NotImplementedError:
# not implemented
return
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
self.assertEqual(len(res), 16)
exp = [('peakmem_predict', 'skl'), ('time_predict', 'skl'),
('track_nbnodes', 'skl'), ('track_onnxsize', 'skl'),
('track_score', 'skl'), ('peakmem_predict', 'pyrt'),
('time_predict', 'pyrt'), ('track_nbnodes', 'pyrt'),
('track_onnxsize', 'pyrt'), ('track_score', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_outlier(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkOutlier()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
expect = 16
opset = get_opset_number_from_onnx()
dtype = 'float'
optim = None
for runtime in ['skl', 'pyrt']:
try:
cl.setup(runtime, N, nf, opset, dtype, optim)
except MissingShapeCalculator:
# Converter not yet implemented.
expect = 0
continue
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
if expect == 0:
return
self.assertEqual(len(res), expect)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_trainable_transform(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkTrainableTransform()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
expect = 12
optim = None
for runtime in ['skl', 'pyrt']:
try:
cl.setup(runtime, N, nf, opset, dtype, optim)
except MissingShapeCalculator:
# Converter not yet implemented.
expect = 0
continue
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
if expect == 0:
return
self.assertEqual(len(res), expect)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('track_nbnodes', 'skl'), ('track_opset', 'skl'),
('track_opset', 'pyrt'), ('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_transform(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkTransform()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
expect = 16
optim = None
for runtime in ['skl', 'pyrt']:
try:
cl.setup(runtime, N, nf, opset, dtype, optim)
except MissingShapeCalculator:
# Converter not yet implemented.
expect = 0
continue
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
if expect == 0:
return
self.assertEqual(len(res), expect)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('track_nbnodes', 'skl'), ('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
@ignore_warnings(category=(UserWarning, ))
def test_template_benchmark_transformPositive(self):
if not os.path.exists('_cache'):
os.mkdir('_cache')
cl = TemplateBenchmarkTransformPositive()
res = {}
cl.setup_cache()
N = 60
nf = cl.params[2][1]
opset = get_opset_number_from_onnx()
dtype = 'float'
expect = 12
optim = None
for runtime in ['skl', 'pyrt']:
try:
cl.setup(runtime, N, nf, opset, dtype, optim)
except MissingShapeCalculator:
# Converter not yet implemented.
expect = 0
continue
self.assertEqual(cl.X.shape, (N, nf))
for method in dir(cl):
if method.split('_')[0] in ('time', 'peakmem', 'track'):
meth = getattr(cl.__class__, method)
res[method, runtime] = meth(
cl, runtime, N, nf, opset, dtype, optim)
if method == 'track_score' and res[method, runtime] in (0, 1):
raise AssertionError(
"Predictions are too perfect: {},{}: {}".format(
method, runtime, res[method, runtime]))
if expect == 0:
return
self.assertEqual(len(res), expect)
exp = [('time_predict', 'skl'), ('peakmem_predict', 'skl'),
('track_score', 'skl'), ('track_onnxsize', 'skl'),
('time_predict', 'pyrt'), ('peakmem_predict', 'pyrt'),
('track_score', 'pyrt'), ('track_onnxsize', 'pyrt'),
('track_nbnodes', 'skl'), ('track_opset', 'skl'),
('track_opset', 'pyrt'), ('track_nbnodes', 'pyrt')]
self.assertEqual(
set(_ for _ in exp if not _[0].startswith('track_v')),
set(_ for _ in res if not _[0].startswith('track_v')))
if __name__ == "__main__":
unittest.main()
| 45.790281
| 82
| 0.526028
| 1,834
| 17,904
| 4.924209
| 0.069248
| 0.051822
| 0.047835
| 0.029897
| 0.871554
| 0.866239
| 0.866239
| 0.834016
| 0.811649
| 0.80423
| 0
| 0.009125
| 0.332831
| 17,904
| 390
| 83
| 45.907692
| 0.746923
| 0.009495
| 0
| 0.856369
| 0
| 0
| 0.154384
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 1
| 0.02439
| false
| 0
| 0.04607
| 0
| 0.086721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
525d52ece48b573503ca731f2429904b1e396480
| 607
|
py
|
Python
|
SViTE/backup/test_mask.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 50
|
2021-05-29T00:52:45.000Z
|
2022-03-17T11:39:47.000Z
|
SViTE/backup/test_mask.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 2
|
2022-01-16T07:24:52.000Z
|
2022-03-29T01:56:24.000Z
|
SViTE/backup/test_mask.py
|
VITA-Group/SViTE
|
b0c62fd153c8b0b99917ab935ee76925c9de1149
|
[
"MIT"
] | 6
|
2021-06-27T22:24:16.000Z
|
2022-01-17T02:45:32.000Z
|
import torch
all_masks = {}
for i in range(8):
all_masks[i] = torch.load('{}-init_mask.pt'.format(i), map_location='cpu')
for key in all_masks[0].keys():
result = []
for i in range(8):
result.append((all_masks[i][key]==all_masks[1][key]).float().mean().item())
print(key, result)
all_masks = {}
for i in range(8):
all_masks[i] = torch.load('{}-init_mask_syn.pt'.format(i), map_location='cpu')
for key in all_masks[0].keys():
result = []
for i in range(8):
result.append((all_masks[i][key]==all_masks[1][key]).float().mean().item())
print(key, result)
| 24.28
| 83
| 0.611203
| 101
| 607
| 3.524752
| 0.277228
| 0.224719
| 0.067416
| 0.123596
| 0.960674
| 0.960674
| 0.960674
| 0.960674
| 0.960674
| 0.960674
| 0
| 0.016032
| 0.177924
| 607
| 25
| 84
| 24.28
| 0.697395
| 0
| 0
| 0.823529
| 0
| 0
| 0.065789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.117647
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
52666a722fa0b78e438d5ad285ca47ebc22e5016
| 314,543
|
py
|
Python
|
gestionale/controllers/default.py
|
ValentinoUberti/mcimporter
|
b9d777ab02eb51d193a86af096b7976e6d5a4dc9
|
[
"Apache-2.0"
] | null | null | null |
gestionale/controllers/default.py
|
ValentinoUberti/mcimporter
|
b9d777ab02eb51d193a86af096b7976e6d5a4dc9
|
[
"Apache-2.0"
] | null | null | null |
gestionale/controllers/default.py
|
ValentinoUberti/mcimporter
|
b9d777ab02eb51d193a86af096b7976e6d5a4dc9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
# This is a sample controller
# - index is the default action of any application
# - user is required for authentication and authorization
# - download is for downloading files uploaded in the db (does streaming)
# -------------------------------------------------------------------------
from __future__ import division
import json
import csv
import datetime
from calendar import monthrange
import gluon
from datetime import timedelta
import wrapper
import subprocess
import os
import socket
import sys
from imports.writeToXlsx import WriteToXlsx
from imports.yamlImporter import YamlImporter
from imports.attendanceImporter import AttendanceImporter
import calendar
import locale
from datetime import datetime
from calendar import monthrange
def ore_dipendenti():
return locals()
def upload_csv():
def fixHours(h):
decimal=h % 1
number=int(h)
print("Decimal = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
def fixHoursAndRest(h):
decimal=h % 1
number=8 - int(h)
print("Decimal = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
def fixHoursAndRestFriday(h):
decimal=h % 1
number=int(h) -7
print("Decimal Friday = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
all=[]
# Save the uploaded file
xlsx=request.vars['csvfile[]'].value
outFileName=filepath = os.path.join(request.folder, 'uploads', "hours_uploaded.xlsx")
outFile=open(outFileName,"w")
outFile.write(xlsx)
outFile.close()
pamaster_path=os.path.join(request.folder, 'static/timbratore', "pamaster.xlsm")
workers_path=os.path.join(request.folder, 'static/timbratore', "workers.yaml")
locale.setlocale(locale.LC_ALL, 'it_IT.UTF-8') # Italian on windows
yamlData = YamlImporter(workers_path)
data = yamlData.importYaml()
attendance=AttendanceImporter(outFileName)
attendance.loadData()
#attendance.orderData()
monthNumber=attendance.finalOrderedActions.days[32]
monthName=calendar.month_name[monthNumber].title()
year=datetime.now().year
downloadFileName="timbrature-"+monthName+"-"+str(year)+".xlsx"
saved_path=os.path.join(request.folder, 'static/timbratore', downloadFileName)
XLSM = WriteToXlsx(pamaster_path, saved_path)
rowsMonth=[1,38,75,112,149]
for i in rowsMonth:
XLSM.write(i,45,monthName)
#Fix day name
for row in data:
startingRow=int(row.startingRow) -2
num_days = monthrange(year, monthNumber)[1]
for day in range(1,num_days+1):
currentDate=datetime.strptime("{0}/{1}/{2}".format(day,monthNumber,year),"%d/%m/%Y")
dayName=currentDate.strftime("%A")[:3]
XLSM.write(int(startingRow),(day*2)-1+4,dayName.upper())
for day in attendance.finalOrderedActions.days:
if day < 32:
for worker in attendance.finalOrderedActions.days[day]:
hours=attendance.finalOrderedActions.days[day][worker]
print("Day {}, Worker {}, Hour {}".format(day,worker,hours))
startingRow=yamlData.returnStartingRow(worker)
currentDate=datetime.strptime("{0}/{1}/{2}".format(day,monthNumber,year),"%d/%m/%Y")
dayOfTheWeek=currentDate.weekday()
if dayOfTheWeek ==4: #Friday
if hours > 6.9:
XLSM.write(int(startingRow),(day*2)-1+5,7)
XLSM.write(int(startingRow)+1,(day*2)-1+5,fixHoursAndRestFriday(hours))
XLSM.write(int(startingRow)+1,(day*2)-2+5,"S1")
XLSM.write(int(startingRow)+2,(day*2)-1+5,1)
XLSM.write(int(startingRow)+2,(day*2)-2+5,"FR")
else:
if hours>0:
XLSM.write(int(startingRow),(day*2)-1+5,fixHours(hours))
XLSM.write(int(startingRow)+2,(day*2)-1+5,fixHoursAndRest(hours)-1)
XLSM.write(int(startingRow)+2,(day*2)-2+5,"FR")
else:
if hours > 7.9:
XLSM.write(int(startingRow),(day*2)-1+5,8)
if fixHours(hours) - 8 >0:
XLSM.write(int(startingRow)+1,(day*2)-2+5,"S1")
XLSM.write(int(startingRow)+1,(day*2)-1+5,fixHours(hours) -8 )
else:
if hours>0:
XLSM.write(int(startingRow),(day*2)-1+5,fixHours(hours))
XLSM.save()
all.append(URL('static/timbratore',downloadFileName))
return response.json(all)
@service.jsonrpc
@service.jsonrpc2
def stampa_rcp(args):
id_riga_in_produzione=args['0']
row = db(db.articoli_in_produzione.id == id_riga_in_produzione).select().first()
scadenza=datetime.datetime.strptime(str(row.data_consegna),"%Y-%m-%d %H:%M:%S").strftime("%d/%m/%Y")
cliente=row.cliente
riferimento_ordine=row.riferimento_ordine
codice_ordine=row.codice_ordine
codice_articolo=row.codice_articolo
descrizione=row.descrizione
saldo=row.qta_saldo
id_riga=row.id_riga
dettaglio_ordine = db(db.ordine_cliente.ultimo_codice_ordine==codice_ordine).select().first()
# print dettaglio_ordine
try:
ente=dettaglio_ordine.ente
if ente is None:
ente="Nessuno"
except:
ente="Nessuno"
# print "Ente : ",ente
try:
revisione = str(db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().revisione)
# print "revisione = "+ revisione
except Exception,e:
# print e.message
pass
dettagli=db(db.anagrafica_articoli.codice_articolo==codice_articolo).select().first()
giacenza=dettagli.giacenza
ubicazione=dettagli.ubicazione
cartella=dettagli.cartella_disegno
peso=dettagli.peso
if peso is None:
peso=""
p = CONTROLLO_PRODUZIONE("Microcarp S.r.l.","Registro dei Controlli in Produzione")
p.intestazione(cliente,riferimento_ordine, codice_articolo,scadenza,revisione, saldo,giacenza,ubicazione,cartella,peso)
p.footer(str(id_riga),ente)
lavorazioni=db(db.lavorazioni).select()
for lavorazione in lavorazioni:
p.add_row(lavorazione.nome,lavorazione.controllo)
p.insert_rows()
p.create_pdf()
@service.jsonrpc
@service.jsonrpc2
def crea_fattura(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
# print "MESE : "+str(m.month)
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
fattura = FATTURA("FATTURA DIFFERITA",start_date.strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
scritta_esenzione = False
for ddt_id in ddts_id:
lista_ddt.append(ddt_id.ddt_id)
riferimento_ddt = "Rif. DDT : " + ddt_id.numero_ddt + " del " + ddt_id.data_emissione
fattura.add_row("",riferimento_ddt,"","","","","","","")
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id.ddt_id).select()
# print "DDT ID : ",ddt_id.ddt_id
for row in rows:
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente " + riferimento_ddt + " Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.quantita)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo + " Qta : " +row.qta
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# print "VALLLLE " + row.codice_iva
descrizione_codice_iva = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine, db.righe_in_ordine_cliente.n_riga==row.n_riga).select().first()["codice_iva"]
codice_iva=db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["codice_iva"]
row.codice_iva=codice_iva
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
"""
Passo il commento ma resetto tutti i campi
"""
row.riferimento_ordine=""
row.u_m=""
row.quantita=""
prezzo=""
sconti=""
importo=""
codice_iva=""
row.codice_articolo=""
# row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
r = db(db.ddt_cliente.id == ddt_id.ddt_id).select().first()
r.update_record(fattura_emessa = "T")
# print lista_codici_iva
bollo= dati_cliente.bollo
if bollo:
print "SONO NEL BOLLO"
codice_articolo="BOLLO"
descrizione="art. 15 DPR 633/72"
riferimento_ordine=""
quantita="1"
prezzo="2,00"
sconti=""
codice_iva="53"
u_m="Nr"
importo="2,00"
fattura.add_row(codice_articolo,descrizione,riferimento_ordine,u_m,quantita,prezzo,sconti,importo,codice_iva)
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = 2
else:
lista_codici_iva[codice_iva] +=2
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
bollo_presente = False
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
if bollo:
_bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(_bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
if not "/" in pagamento:
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
if "r.b." in pagamento.lower() or "riba" in pagamento.lower():
riba=True
else:
riba=False
db.fatture_salvate.insert(scadenza=scadenza,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
else:
# Devo mettere due fatture con il pagamento e scadenza corretti
first_half = round(importo_totale_da_salvare / 2,2)
second_half= importo_totale_da_salvare - first_half
s=pagamento
st = int(s[s.index("/")+1:s.index("/")+4]) - int(s[s.index("/")-3:s.index("/")])
second_date = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
first_date = second_date - datetime.timedelta(days = int(st) +1)
if "F.M" in pagamento:
pass
first_date = first_date.strftime("%d/%m/%Y")
# day_start,day_end = monthrange(first_date.year, first_date.month)
# first_date = str(day_end)+"/"+str(first_date.month)+"/"+str(first_date.year)
else:
first_date = first_date.strftime("%d/%m/%Y")
second_date = second_date.strftime("%d/%m/%Y")
if "r.b." in pagamento.lower() or "riba" in pagamento.lower():
riba=True
else:
riba=False
first_date = datetime.datetime.strptime(first_date,"%d/%m/%Y")
second_date = datetime.datetime.strptime(second_date,"%d/%m/%Y")
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
db.fatture_salvate.insert(scadenza=first_date,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = first_half,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
db.fatture_salvate.insert(scadenza=second_date,nome_cliente=nome_cliente,data_fattura = start_date,numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = second_half,richiede_riba=riba,riba_emessa=False,user_id=auth.user_id)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
db(db.fattura).delete()
db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
db(db.ddt_da_fatturare.user_id == auth.user_id).delete()
def return_scadenza(fattura_id):
ddts = db(db.fatture_salvate.id == fattura_id).select().first()["id_ddt"]
ddts_list = eval(ddts)
scadenza=""
start_date = datetime.datetime.strptime("28/02/2017","%d/%m/%Y")
for ddt in ddts_list:
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id ==ddt).select()
# print "DDT ID : ",ddt
for row in rows:
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if "M.S." in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = 10)
scadenza = scadenza.strftime("%d/%m/%Y")
else:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
except Exception,e:
# print e
pass
return scadenza
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
# print "qui"
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
# print "MESE : "+str(m.month)
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
print "-- DATE CHECK --"
print start_date
fattura = FATTURA("FATTURA DIFFERITA",start_date.strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
# print "IBAN : ",iban_cliente
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),"PAGAMENTO","SCADENZA")
except:
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
for ddt_id in ddts_id:
lista_ddt.append(ddt_id.ddt_id)
riferimento_ddt = "Rif. DDT : " + ddt_id.numero_ddt + " del " + ddt_id.data_emissione
fattura.add_row("",riferimento_ddt,"","","","","","","")
print ddt_id
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id.ddt_id).select()
print "PAst creation ---##"
# print "DDT ID : ",ddt_id.ddt_id
scritta_esenzione = False
for row in rows:
print row
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente " + riferimento_ddt + " Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.quantita)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo + " Qta : "
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# print "VALLLLE " + row.codice_iva
descrizione_codice_iva = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine, db.righe_in_ordine_cliente.n_riga==row.n_riga).select().first()["codice_iva"]
codice_iva=db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["codice_iva"]
row.codice_iva=codice_iva
# print "Nuovo codice iva : "+row.codice_iva
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
"""
Passo il commento ma resetto tutti i campi
"""
# print row
row.riferimento_ordine=""
row.u_m=""
row.quantita=""
prezzo=""
sconti=""
importo=""
codice_iva=""
row.codice_articolo=""
# row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo= dati_cliente.bollo
if bollo:
print "SONO NEL BOLLO"
codice_articolo="BOLLO"
descrizione="art. 15 DPR 633/72"
riferimento_ordine=""
quantita="1"
prezzo="2,00"
sconti=""
codice_iva="53"
u_m="Nr"
importo="2,00"
fattura.add_row(codice_articolo,descrizione,riferimento_ordine,u_m,quantita,prezzo,sconti,importo,codice_iva)
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = 2
else:
lista_codici_iva[codice_iva] +=2
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
bollo_presente = False
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
if bollo:
_bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(_bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
# print "Imposta iva {0}".format(imposta_iva)
# print "Importo calcolato {0}".format(importo_totale_da_salvare)
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview_istantanea(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
bollo= dati_cliente.bollo
if bollo:
db(db.righe_in_fattura_istantanea.codice_articolo=="BOLLO").delete()
db.righe_in_fattura_istantanea.insert(
codice_articolo="BOLLO",
descrizione="art. 15 DPR 633/72",
riferimento_ordine="",
qta="1",
prezzo="2",
sconti="",
codice_iva="Esenzione Iva",
commento=""
)
scritta_esenzione = False
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("FATTURA IMMEDIATA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
scritta_esenzione = False
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
descrizione_codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["descrizione_codice_iva"]
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.u_m,row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","","",""
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
row.u_m=""
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
scadenza=""
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
bollo = 0
"""
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
"""
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview_istantanea_accredito(args):
# print "In preview instantanea accredito"
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("NOTA DI ACCREDITO",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
scadenza=""
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
# print "Importo totale "+str(importo_totale_da_salvare)
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_istantanea(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
bollo= dati_cliente.bollo
if bollo:
db(db.righe_in_fattura_istantanea.codice_articolo=="BOLLO").delete()
db.righe_in_fattura_istantanea.insert(
codice_articolo="BOLLO",
descrizione="art. 15 DPR 633/72",
riferimento_ordine="",
qta="1",
prezzo="2",
sconti="",
codice_iva="Esenzione Iva",
commento=""
)
scritta_esenzione = False
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("FATTURA IMMEDIATA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and 'commento' not in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
# print "!QWUEIQWEUQWUE"
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
descrizione_codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["descrizione_codice_iva"]
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.u_m,row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","","",""
row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
if scritta_esenzione:
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
"""
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
"""
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
lista_ddt=[] #Fattura senza ddt = istantanea
db.fatture_salvate.insert(scadenza=scadenza,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
db(db.fattura).delete()
db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_istantanea_accredito(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("NOTA DI ACCREDITO",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
row.u_m=""
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
lista_ddt=[] #Fattura senza ddt = istantanea
db.fatture_salvate.insert(scadenza=scadenza,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
db(db.fattura).delete()
db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
def ritorna_righe_in_ddt(id_ddt):
righe = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt).select()
r=[]
for riga in righe:
r.append(riga.codice_articolo+"\n")
return r
def del_saved_rows(table, row_id):
db(db.saved_righe_in_ddt_cliente.saved_ddt_id == row_id).delete()
return "ok"
def del_ddt_clienti():
db.ddt_cliente.righe=Field.Virtual("righe", lambda row: ritorna_righe_in_ddt(row.ddt_cliente.id))
fields = [db.ddt_cliente.nome_cliente,db.ddt_cliente.data_richiesta,db.ddt_cliente.numero_ddt,db.ddt_cliente.righe]
form = SQLFORM.grid(db.ddt_cliente,formname='del',maxtextlength=100,create=False,editable=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,fields=fields,ondelete=del_saved_rows)
return locals()
def controllo_errori():
db(db.errori).delete()
clienti = db(db.clienti).select()
for cliente in clienti:
if cliente.codice_banca is None or len(cliente.codice_banca)<1:
errore = "Codice banca assente per il cliente {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
else:
banca_cliente =cliente.codice_banca
dati_banca_cliente = db(db.anagrafica_banche.descrizione == banca_cliente).select().first()
if dati_banca_cliente is None:
errore = "Banca non in anagrafica per il cliente {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
if cliente.citta is None or len(cliente.citta)<1:
errore = "Città assente per il cliente {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
if cliente.pagamento is None or len(cliente.pagamento)<1:
errore = "Pagamento assente per il cliente {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
clienti = db(db.fornitori).select()
for cliente in clienti:
if cliente.codice_banca is None or len(cliente.codice_banca)<1:
errore = "Codice banca assente per il fornitore {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
if cliente.citta is None or len(cliente.citta)<1:
errore = "Città assente per il fornitore {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
if cliente.pagamento is None or len(cliente.pagamento)<1:
errore = "Pagamento assente per il fornitore {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
clienti = db(db.anagrafica_banche).select()
for cliente in clienti:
if cliente.codice_abi is None or len(cliente.codice_abi)!=5:
errore = "Lunghezza codice ABI non corretta per la banca {0}".format(cliente.descrizione)
db.errori.insert(tipo_errore = errore)
if cliente.codice_cab is None or len(cliente.codice_cab)!=5:
errore = "Lunghezza codice CAB non corretta per la banca {0}".format(cliente.descrizione)
db.errori.insert(tipo_errore = errore)
"""
if cliente.domicilio is None or len(cliente.domicilio)<1:
errore = "Domicilio assente per il fornitore {0}".format(cliente.nome)
db.errori.insert(tipo_errore = errore)
"""
count = db.saved_ddt.numero_ddt.count()
ddts = db().select(db.saved_ddt.numero_ddt,groupby = db.saved_ddt.numero_ddt, having=count > 1)
for ddt in ddts:
errore = "DDT duplicato numero {0} del {1} per il cliente {2}".format(ddt.numero_ddt,ritorna_data_inserimento(ddt.numero_ddt),ritorna_cliente_da_numero_ddt(ddt.numero_ddt))
db.errori.insert(tipo_errore = errore)
pagamenti = db(db.ordine_cliente).select()
for pagamento in pagamenti:
if db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento.pagamento).isempty():
if pagamento.pagamento is None:
errore = "Pagamento non esistente per ordine cliente {0}. Verrà usato il pagamento associato al cliente".format(pagamento.ultimo_codice_ordine)
else:
errore = "Pagamento '{0}' ordine cliente {1} non esistente in anagrafica pagamenti".format(pagamento.pagamento,pagamento.ultimo_codice_ordine)
db.errori.insert(tipo_errore = errore)
ddts=db(db.saved_ddt).select()
for ddt in ddts:
if db(db.ddt_cliente.id ==ddt.saved_ddt_id).isempty():
# db(db.saved_ddt.id == ddt.id).delete()
errore = "Cancellato ddt orfano salvato {0}".format(ddt.id)
db.errori.insert(tipo_errore = errore)
ordini=db(db.ordine_cliente).select()
for ordine in ordini:
if db(db.righe_in_ordine_cliente.id_ordine_cliente == ordine.id).isempty():
# db(db.saved_ddt.id == ddt.id).delete()
errore = "Ordine cliente {0} senza righe associate".format(ordine.ultimo_codice_ordine)
db.errori.insert(tipo_errore = errore)
if tutte_le_righe_completate_in_ordine_id(ordine.id):
# print "ORDINE ID : ",ordine.id
ordine.update_record(ddt_completato='T')
else:
ordine.update_record(ddt_completato='F')
articoli=db(db.anagrafica_articoli).select()
for articolo in articoli:
# articolo.update_record(tipo_articolo="Prodotto finito",tipo_ordine="Ordine acquisto",codice_sottoconto="8820125")
if articolo.giacenza is None:
errore = "Articolo {0} senza giacenza".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
articolo.update_record(giacenza=0)
try:
if int(articolo.giacenza) < 0:
errore = "Articolo {0} con giacenza negativa".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
# articolo.update_record(giacenza=0)
except:
errore = "Articolo {0} con giacenza in errore".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
articolo.update_record(giacenza=0)
pass
if articolo.codice_iva is None:
errore = "Articolo {0} senza iva".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
# articolo.update_record(giacenza=0)
if articolo.trattamento is None:
errore = "Articolo {0} senza trattamento".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
articolo.update_record(trattamento="Si")
if articolo.giacenza == "5000":
errore = "Articolo {0} senza giacenza".format(articolo.codice_articolo)
db.errori.insert(tipo_errore = errore)
articolo.update_record(giacenza=0)
anagrafica_banche_azienda = db(db.anagrafica_banche_azienda).select()
if anagrafica_banche_azienda is None:
errore = "INSERIRE ANAGRAFICA NOSTRA BANCA PER RIBA"
db.errori.insert(tipo_errore = errore)
"""
per rimuovere il "|" dai ddt fattura
Commentare una volta eseguita questa routine!!
fatture = db(db.fatture_salvate).select()
for fattura in fatture:
saved_date = fattura.scadenza
data_fattura = fattura.data_fattura
if "|" in fattura.id_ddt:
# print "ok"
lista_ddt = fattura.id_ddt.split("|")
lista_ddt = filter(None,lista_ddt)
# print lista_ddt
fattura.data_fattura=datetime.datetime.strptime("12/01/1979","%d/%m/%Y")
fattura.update_record(id_ddt=str(lista_ddt))
# print fattura
# db(db.fatture_salvate.id==fattura.id).update(id_ddt=lista_ddt)
if saved_date is None:
# print "Scadenza trovata = {0} ".format(return_scadenza(fattura.id))
fattura.update_record(scadenza=datetime.datetime.strptime(return_scadenza(fattura.id),"%d/%m/%Y"))
# print data_fattura
if fattura.id <= 100:
fattura.update_record(data_fattura=datetime.datetime.strptime("28/02/2017","%d/%m/%Y"))
# print fattura.id
pagamento,scadenza = ritorna_tipo_pagamento_da_fattura(fattura.id)
# print "si"
if "R.B." in pagamento:
fattura.update_record(richiede_riba='T')
else:
fattura.update_record(richiede_riba='F')
"""
"""
rows=db(db.saved_righe_in_ddt_cliente).select()
for row in rows:
count_ddt = db(db.ddt_cliente.id == row.saved_ddt_id).count()
if count == 0:
errore = "Trovata riga non associata a ddt : id_riga = {0}".format(row.id)
db.errori.insert(tipo_errore = errore)
db(db.saved_righe_in_ddt_cliente.id == row.id).delete()
"""
form = SQLFORM.grid(db.errori,maxtextlength=500,editable=False,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False)
return locals()
def ritorna_data_inserimento(ddt_id):
data = db(db.saved_ddt.numero_ddt == ddt_id).select().first()["data_inserimento"]
data_ddt=datetime.datetime.strptime(data[0:10],"%Y-%m-%d").date()
data_ddt=data_ddt.strftime("%d/%m/%Y")
return data_ddt
def ritorna_cliente_da_numero_ddt(ddt_id):
# ddt_id = db(db.saved_ddt.numero_ddt == ddt_id).select().first()["id"]
# print ddt_id
try:
nome_cliente = db(db.ddt_cliente.numero_ddt == ddt_id).select()["nome_cliente"]
except:
nome_cliente = "NON ASSEGNATO"
return nome_cliente
@service.jsonrpc
@service.jsonrpc2
def insert_ddt_preview(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
row = db(db.clienti.id==id_cliente).select().first()
try:
consegna = consegna.split(",")
except:
consegna = "Come intestazione ,,,,,,".split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
# ddt_id.update_record(numero_ddt=numero_ddt_corrente)
# db.saved_ddt.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
# row2 = db(db.ddt).select().first()
# row2.update_record(numero_ddt = numero_ddt_corrente)
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
pa = DDT(d,numero_ddt_corrente,"Cliente",anteprima=True)
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
# tutte_le_righe_completate = True
try:
for row in rows:
id_ordine = row["id_ordine"]
codice_articolo = row["codice_articolo"]
codice_ordine = row["codice_ordine"]
if "commento" not in codice_articolo:
quantita = row['quantita_prodotta']
prezzo = row['prezzo']
riferimento_ordine = row["riferimento_ordine"]+" - POS."+row["n_riga"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
id_riga_ordine = row["id_riga_ordine"]
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
if q is not None:
try:
quantita_richiesta = int(row["quantita_richiesta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_prodotta_fino_ad_ora = 0
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta) + quantita_prodotta
# r = db(db.produzione_righe_per_ddt.id_riga_ordine == str(id_riga_ordine)).select().first()
# r.update_record(quantita_prodotta=str(quantita_prodotta_fino_ad_ora))
except Exception,e:
response.flash="Controlla le quantità"
return "ok"
# print e
else:
"""
E' la prima volta che inserisco la riga della quantità
"""
# print "E' la prima volta che inserisco la riga della quantita"
quantita_prodotta_fino_ad_ora = int(row["quantita_prodotta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_richiesta = int(row["quantita_richiesta"])
# db.produzione_righe_per_ddt.insert(id_riga_ordine = id_riga_ordine,quantita_prodotta = quantita_prodotta)
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
# to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# to_update.update_record(riga_emessa_in_ddt = True)
pass
else:
# tutte_le_righe_completate = Fals
pass
# print "SONO QUII"
# print "{0}".format(tutte_le_righe_completate)
quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
d = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine).select().first()["commento"]
descrizione = d
row.codice_articolo=" "
n_riga=" "
riferimento_ordine=" "
quantita_prodotta=0
prezzo=" "
evasione=" "
row["u_m"]=" "
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],str(row.quantita_prodotta))
# db.saved_righe_in_ddt_cliente.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
# redirect(URL('ddt_clienti'))
return "ok"
@service.jsonrpc
@service.jsonrpc2
def insert_ddt(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
# print consegna
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id,consegna=str(consegna))
# print "Aggiornato"
# return locals()
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
row = db(db.clienti.id==id_cliente).select().first()
try:
consegna = consegna.split(",")
except:
consegna = "Come intestazione ,,,,,,".split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
ddt_id.update_record(numero_ddt=numero_ddt_corrente)
db.saved_ddt.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
row2 = db(db.ddt).select().first()
row2.update_record(numero_ddt = numero_ddt_corrente)
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
pa = DDT(d,numero_ddt_corrente,"Cliente")
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
# tutte_le_righe_completate = True
try:
for row in rows:
id_ordine = row["id_ordine"]
codice_articolo = row["codice_articolo"]
codice_ordine = row["codice_ordine"]
if "commento" in codice_articolo:
id_riga_ordine = row["id_riga_ordine"]
evasione = row["evasione"]
n_riga = row["n_riga"]
elif "commento" not in codice_articolo:
quantita = row['quantita_prodotta']
prezzo = row['prezzo']
riferimento_ordine = row["riferimento_ordine"]+" - POS."+row["n_riga"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
id_riga_ordine = row["id_riga_ordine"]
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
if q is not None:
try:
quantita_richiesta = int(row["quantita_richiesta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_prodotta_fino_ad_ora = 0
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta) + quantita_prodotta
r = db(db.produzione_righe_per_ddt.id_riga_ordine == str(id_riga_ordine)).select().first()
r.update_record(quantita_prodotta=str(quantita_prodotta_fino_ad_ora))
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
to_update.update_record(riga_emessa_in_ddt = True)
db(db.riserva_quantita.id_riga_ordine==id_riga_ordine).delete()
except Exception,e:
response.flash="Controlla le quantità"
return "ok"
# print e
else:
"""
E' la prima volta che inserisco la riga della quantità
"""
# print "E' la prima volta che inserisco la riga della quantita"
quantita_prodotta_fino_ad_ora = int(row["quantita_prodotta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_richiesta = int(row["quantita_richiesta"])
db.produzione_righe_per_ddt.insert(id_riga_ordine = id_riga_ordine,quantita_prodotta = quantita_prodotta)
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
to_update.update_record(riga_emessa_in_ddt = True)
# db(db.riserva_quantita.id_riga_ordine==id_riga_ordine).delete()
else:
# tutte_le_righe_completate = Fals
pass
# print "SONO QUII"
# print "{0}".format(tutte_le_righe_completate)
quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
da_rimuovere = int(quantita_prodotta) * -1
db.riserva_quantita.insert(codice_articolo = row.codice_articolo,quantita = da_rimuovere,id_riga_ordine = id_riga_ordine,user_id=auth.user_id)
rimuovi_giacenza(codice_articolo,row.quantita_prodotta)
"""Metto negativo per liberare la prenotazione articolo"""
else:
d = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine).select().first()["commento"]
descrizione = d
row.codice_articolo=" "
# n_riga=" "
riferimento_ordine=" "
quantita_prodotta=0
prezzo=" "
evasione=datetime.datetime.now()
row["u_m"]=" "
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],str(row.quantita_prodotta))
db.saved_righe_in_ddt_cliente.insert(id_riga_ordine=id_riga_ordine,saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
"""
if tutte_le_righe_completate:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
"""
if tutte_le_righe_completate():
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
redirect(URL('ddt_clienti'))
return "ok"
def rimuovi_giacenza(codice_articolo,quantita_prodotta):
# print codice_articolo,quantita_prodotta
row = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
# print row
attuale = int(row.giacenza)
da_aggiornare = str(attuale - int(quantita_prodotta))
# print "Attuale : {0} Da aggiornare = {1}".format(attuale,da_aggiornare)
row.update_record(giacenza = da_aggiornare)
def manutenzione_righe_ordini_clienti():
form = SQLFORM.grid(db.righe_in_ordine_cliente)
return locals()
@service.jsonrpc
@service.jsonrpc2
def insert_mod_ddt(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
# print "Consegna ",consegna
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
# print ddt_id
ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id,consegna=consegna)
# print "CIAOOOO ",ddt_id
# return locals()
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
row = db(db.clienti.id==id_cliente).select().first()
try:
consegna = consegna.split(",")
except:
consegna = "Come intestazione ,,,,,,".split(",")
"""
Insert into saved ddt table
"""
numero_ddt_corrente = ddt_id.numero_ddt
# print numero_ddt_corrente
# db.saved_ddt.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
data_scelta=""
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
pa = DDT(d,numero_ddt_corrente,"Cliente")
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
# print "ciao ",ddt_id
"""
1) salvare le righe del ddt in una tabella per creare UNDO
2) cancellare i riferimenti a saved_righe_in_ddt_cliente
3) inserire le righe ddt as usual
"""
produzione_da_rimuovere=0
old_rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id ==ddt_id.id).select()
# print old_rows
for r in old_rows:
# print old_rows
db.saved_righe_in_ddt_cliente_undo.insert(**db.saved_righe_in_ddt_cliente._filter_fields(r))
db(db.saved_righe_in_ddt_cliente.id == r.id).delete()
produzione_da_rimuovere = r.quantita
"""
Ritornare id riga ordine anche se NULL
"""
if r.id_riga_ordine is None or len(r.id_riga_ordine)<1:
id_riga_ordine=db((db.righe_in_ordine_cliente.id_ordine_cliente == r.id_ordine) & (db.righe_in_ordine_cliente.n_riga ==r.n_riga)).select().first()["id"]
else:
id_riga_ordine = r.id_riga_ordine
db((db.produzione_righe_per_ddt.quantita_prodotta == produzione_da_rimuovere) & (db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine)).delete()
# return ""
# tutte_le_righe_completate = True
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
db(db.saved_righe_in_ddt_cliente.saved_ddt_id ==ddt_id.id).delete()
try:
for row in rows:
id_ordine = row["id_ordine"]
codice_articolo = row["codice_articolo"]
codice_ordine = row["codice_ordine"]
if row.id_riga_ordine is None or len(row.id_riga_ordine)<1:
id_riga_ordine=db((db.righe_in_ordine_cliente.id_ordine_cliente == row.id_ordine) & (db.righe_in_ordine_cliente.n_riga ==row.n_riga)).select().first()["id"]
else:
id_riga_ordine = row.id_riga_ordine
# print "ID RIGA ORDINE ",id_riga_ordine
if "commento" not in codice_articolo:
quantita = row['quantita_prodotta']
prezzo = row['prezzo']
riferimento_ordine = row["riferimento_ordine"]+" - POS."+row["n_riga"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
# id_riga_ordine = row["id_riga_ordine"]
# print id_riga_ordine
"""
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
# print "Quantita trovata già prodotta : ",q
if q is not None:
try:
quantita_richiesta = int(row["quantita_richiesta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_prodotta_fino_ad_ora = 0
quantita_prodotta_fino_ad_ora = quantita_prodotta
r = db(db.produzione_righe_per_ddt.id_riga_ordine == str(id_riga_ordine)).select().first()
r.update_record(quantita_prodotta=str(quantita_prodotta_fino_ad_ora))
except Exception,e:
response.flash="Controlla le quantità"
# print e
return "ok"
else:
"""
if True:
"""
E' la prima volta che inserisco la riga della quantità
"""
# print "E' la prima volta che inserisco la riga della quantita"
quantita_prodotta_fino_ad_ora = int(row["quantita_prodotta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_richiesta = int(row["quantita_richiesta"])
db.produzione_righe_per_ddt.insert(id_riga_ordine = id_riga_ordine,quantita_prodotta = quantita_prodotta)
# print "qui"
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
to_update.update_record(riga_emessa_in_ddt = 'T')
db.riserva_quantita.insert
rimuovi_giacenza(codice_articolo,row.quantita_prodotta)
"""Metto negativo per liberare la prenotazione articolo"""
else:
# print "Riapro la riga"
# print "ID RIGA ORDINE : ",id_riga_ordine
to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# print to_update.id
to_update.update_record(riga_emessa_in_ddt = 'F')
da_rimuovere = int(quantita_prodotta_fino_ad_ora) * -1
db.riserva_quantita.insert(codice_articolo = row.codice_articolo,quantita = da_rimuovere,id_riga_ordine = id_riga_ordine,user_id=auth.user_id)
db.riserva_quantita.insert(codice_articolo = row.codice_articolo,quantita = quantita_prodotta_fino_ad_ora,id_riga_ordine = id_riga_ordine,user_id=auth.user_id)
giacenza = int(produzione_da_rimuovere)
# print "produzione da rimuovere = ",giacenza
vecchia_giacenza = int(db(db.anagrafica_articoli.codice_articolo ==codice_articolo ).select().first()["giacenza"])
# print "vecchia giacenza ",vecchia_giacenza
nuova_giacenza = vecchia_giacenza - giacenza
# print "nuova giacenza ",nuova_giacenza
nuova_giacenza += int(quantita_prodotta_fino_ad_ora)
# print "nuova giacenza 2 ",nuova_giacenza
g = db(db.anagrafica_articoli.codice_articolo ==codice_articolo).select().first()
g.update_record(giacenza = str(nuova_giacenza))
quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
d = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine).select().first()["commento"]
descrizione = d
row.codice_articolo=" "
n_riga=" "
riferimento_ordine=" "
quantita_prodotta=0
prezzo=" "
evasione=datetime.datetime.now()
row["u_m"]=" "
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],str(row.quantita_prodotta))
db.saved_righe_in_ddt_cliente.insert(id_riga_ordine=row.id_riga_ordine,saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
"""
if tutte_le_righe_completate:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
"""
if tutte_le_righe_completate():
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
else:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = False)
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
redirect(URL('ddt_clienti'))
return "ok"
@service.jsonrpc
@service.jsonrpc2
def insert_mod_ddt_preview(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
# ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id)
# print "CIAOOOO ",ddt_id
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
row = db(db.clienti.id==id_cliente).select().first()
try:
consegna = consegna.split(",")
except:
consegna = "Come intestazione ,,,,,,".split(",")
"""
Insert into saved ddt table
"""
numero_ddt_corrente = ddt_id.numero_ddt
# print numero_ddt_corrente
# db.saved_ddt.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
data_scelta =""
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
pa = DDT(d,numero_ddt_corrente,"Cliente",anteprima=True)
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
# print "ciao ",ddt_id
"""
1) salvare le righe del ddt in una tabella per creare UNDO
2) cancellare i riferimenti a saved_righe_in_ddt_cliente
3) inserire le righe ddt as usual
"""
# return ""
# tutte_le_righe_completate = True
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
try:
for row in rows:
id_ordine = row["id_ordine"]
codice_articolo = row["codice_articolo"]
codice_ordine = row["codice_ordine"]
if "commento" not in codice_articolo:
quantita = row['quantita_prodotta']
prezzo = row['prezzo']
riferimento_ordine = row["riferimento_ordine"]+" - POS."+row["n_riga"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
id_riga_ordine = row["id_riga_ordine"]
# print row
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
# print "Quantita trovata già prodotta : ",q
if q is not None:
try:
quantita_richiesta = int(row["quantita_richiesta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_prodotta_fino_ad_ora = 0
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta) + quantita_prodotta
# r = db(db.produzione_righe_per_ddt.id_riga_ordine == str(id_riga_ordine)).select().first()
# r.update_record(quantita_prodotta=str(quantita_prodotta_fino_ad_ora))
except Exception,e:
response.flash="Controlla le quantità"
# print e
return "ok"
else:
"""
E' la prima volta che inserisco la riga della quantità
"""
# print "E' la prima volta che inserisco la riga della quantita"
quantita_prodotta_fino_ad_ora = int(row["quantita_prodotta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_richiesta = int(row["quantita_richiesta"])
db.produzione_righe_per_ddt.insert(id_riga_ordine = id_riga_ordine,quantita_prodotta = quantita_prodotta)
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
# to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# to_update.update_record(riga_emessa_in_ddt = True)
pass
else:
# tutte_le_righe_completate = Fals
pass
# print "SONO QUII"
# print "{0}".format(tutte_le_righe_completate)
quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
d = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine).select().first()["commento"]
# print "COMMENTO {0}, RIGA ORDINE {1}".format(d,row.id_riga_ordine)
descrizione = d
row.codice_articolo=" "
n_riga=" "
riferimento_ordine=" "
quantita_prodotta=0
prezzo=" "
evasione=" "
row["u_m"]=" "
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],str(row.quantita_prodotta))
# db.saved_righe_in_ddt_cliente.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
"""
if tutte_le_righe_completate:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
"""
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
redirect(URL('ddt_clienti'))
return "ok"
def tutte_le_righe_completate():
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
righe_completate = True
# print "IN TUTTE LE RIGHE COMPLETATE -----------------"
try:
for row in rows:
if row.id_riga_ordine is None or len(row.id_riga_ordine)<1:
id_riga_ordine=db((db.righe_in_ordine_cliente.id_ordine_cliente == row.id_ordine) & (db.righe_in_ordine_cliente.n_riga ==row.n_riga)).select().first()["id"]
else:
id_riga_ordine = row.id_riga_ordine
# print row
# print "-----"
codice_articolo = row["codice_articolo"]
if "commento" not in codice_articolo:
riga = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# print riga
if not riga.riga_emessa_in_ddt:
# print "non tutte le righe sono state completate"
righe_completate = False
except Exception,e:
# print e
# quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
pass
return righe_completate
def riga_completata(id_riga_ordine):
row = db(db.righe_in_ordine_cliente.id == id_riga_ordine ).select().first()
# print row
return row.riga_emessa_in_ddt
def tutte_le_righe_completate_in_ordine_id(id_ordine):
rows = db(db.righe_in_ordine_cliente.id_ordine_cliente == id_ordine).select()
righe_completate = True
try:
for row in rows:
codice_articolo = row["codice_articolo"]
if "commento" not in codice_articolo:
if not row.riga_emessa_in_ddt:
# print "non tutte le righe sono state completate"
righe_completate = False
except Exception,e:
# print e
# quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
pass
return righe_completate
@service.jsonrpc
@service.jsonrpc2
def insert_ddt_fornitori(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
# print args
ddt_id = db(db.ddt_fornitore.id == id_ddt).select().first()
ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id)
id_fornitore = ddt_id.id_fornitore
nome_fornitore = ddt_id.nome_fornitore
row = db(db.fornitori.id==id_fornitore).select().first()
consegna = consegna.split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
ddt_id.update_record(numero_ddt=numero_ddt_corrente)
db.saved_ddt_fornitori.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = d, user_id = auth.user_id)
row2 = db(db.ddt).select().first()
row2.update_record(numero_ddt = numero_ddt_corrente)
pa = DDT(d,numero_ddt_corrente,"Fornitore")
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
rows = db(db.righe_in_ddt_fornitore.user_id == auth.user_id).select()
for row in rows:
quantita = row['quantita']
prezzo = row['prezzo']
codice_articolo = row["codice_articolo"]
riferimento_ordine = row["codice_ordine"]+" - POS."+row["n_riga"]
id_ordine = row["id_ordine"]
codice_ordine = row["codice_ordine"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
ordine=db(db.ordine_fornitore.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
# descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
descrizione = row.descrizione
row.codice_articolo=""
n_riga=""
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],row["quantita"])
db.saved_righe_in_ddt_fornitore.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
# print row
# p.insert_rows()
pa.insert_rows()
pa.create_pdf()
# print request.folder
redirect(URL('ddt_fornitori'))
return "ok"
@service.jsonrpc
@service.jsonrpc2
def insert_ddt_fornitori_preview(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
# print args
ddt_id = db(db.ddt_fornitore.id == id_ddt).select().first()
id_fornitore = ddt_id.id_fornitore
nome_fornitore = ddt_id.nome_fornitore
row = db(db.fornitori.id==id_fornitore).select().first()
consegna = consegna.split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
# ddt_id.update_record(numero_ddt=numero_ddt_corrente)
# db.saved_ddt_fornitori.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
row2 = db(db.ddt).select().first()
# row2.update_record(numero_ddt = numero_ddt_corrente)
pa = DDT(d,numero_ddt_corrente,"Fornitore",anteprima=True)
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("LEONARDO SPA", "CAMPI BISENZIO", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
rows = db(db.righe_in_ddt_fornitore.user_id == auth.user_id).select()
for row in rows:
quantita = row['quantita']
prezzo = row['prezzo']
codice_articolo = row["codice_articolo"]
riferimento_ordine = row["codice_ordine"]+" - POS."+row["n_riga"]
id_ordine = row["id_ordine"]
codice_ordine = row["codice_ordine"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
# descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
descrizione = row.descrizione
row.codice_articolo=""
n_riga=""
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],row["quantita"])
# db.saved_righe_in_ddt_fornitore.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
# print row
# p.insert_rows()
pa.insert_rows()
pa.create_pdf()
# print request.folder
return "ok"
def fatture_per_riba():
fields=[db.fatture_scelte.numero_fattura,db.fatture_scelte.totale]
form = SQLFORM.grid(db.fatture_scelte,create=False,editable=False,deletable=True,csv=False,fields=fields)
return locals()
@service.jsonrpc
@service.jsonrpc2
def aggiungi_fattura(args):
id_fattura = args['0']
fattura = db(db.fatture_salvate.id ==id_fattura).select().first()
db((db.fatture_scelte.id_fattura == id_fattura) & (db.fatture_scelte.user_id == auth.user_id)).delete()
db.fatture_scelte.insert(scadenza=fattura.scadenza,id_cliente=fattura.id_cliente,cliente=fattura.nome_cliente,id_fattura=fattura.id,numero_fattura=fattura.numero_fattura,totale=fattura.totale,user_id = auth.user_id)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def add_row_to_ddt(args):
id_ordine = args['0']
# ritorna_quantita_saldo
# auth.user_id
# print "ID ORDINE : ",id_ordine
db((db.righe_in_ddt_cliente.user_id == auth.user_id) & (db.righe_in_ddt_cliente.id_ordine == id_ordine)).delete()
row = db(db.ordine_cliente.id == id_ordine).select().first()
ultimo_codice_ordine = row['ultimo_codice_ordine']
nome_cliente = row['nome_cliente']
data_inserimento = row['data_inserimento']
listino = row['listino']
riferimento_ordine_cliente = row['riferimento_ordine_cliente']
listino = row['listino']
magazzino_interno = row['magazzino_interno']
numero_ordine = row['ultimo_codice_ordine']
saldo=0
quantita_da_produrre=0
rows = db((db.righe_in_ordine_cliente.id_ordine_cliente == id_ordine),(db.righe_in_ordine_cliente.riga_emessa_in_ddt == 'F')).select()
for row in rows:
# print "riga emessa in DDT"+str(row.riga_emessa_in_ddt)
if "commento" in row.codice_articolo:
quantita_da_produrre = prenotato = quantita_prodotta = saldo = 0
db.righe_in_ddt_cliente.insert(saldo=0,codice_ordine=numero_ordine,quantita_richiesta=0,quantita_prodotta = 0, prezzo=0,sconti=0,codice_iva=row.codice_iva,evasione=row.evasione,user_id=auth.user_id,riferimento_ordine=riferimento_ordine_cliente,id_ordine=id_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,id_riga_ordine=row.id)
pass
elif not row.riga_emessa_in_ddt:
"""
Vado a vedere la quantità attualmente prodotta salvata nella tabella "produzione_righe_per_ddt"
"""
row_id = row.id
dettagli_produzione_riga = db(db.produzione_righe_per_ddt.id_riga_ordine == row.id).select().first()
if dettagli_produzione_riga is not None:
# print "Riga trovata"
"""
Se ho trovato la riga vuol dire che è stata immessa una quantità in saldo.
Vado a recuperare la quantità prodotta
"""
# quantita_da_produrre= int(row.quantita) - int(dettagli_produzione_riga.quantita_prodotta)
quantita_da_produrre = prenotato = ritorna_totale_prenotazione_da_codice_articolo_e_riga_id(row.codice_articolo,row_id)
quantita_prodotta = dettagli_produzione_riga.quantita_prodotta
saldo=ritorna_quantita_saldo(row_id)
else:
# print "Riga non trovata"
"""
Metto la quantita prodotta = alla quantita richiesta per velocizzare l'inserimento
row.quantita è l'iniziale quantita richiesta nell'ordine
"""
quantita_da_produrre = prenotato = ritorna_totale_prenotazione_da_codice_articolo_e_riga_id(row.codice_articolo,row_id)
quantita_prodotta = 0
saldo=ritorna_quantita_saldo(row_id)
db.righe_in_ddt_cliente.insert(saldo=saldo,codice_ordine=numero_ordine,quantita_richiesta=row.quantita,quantita_prodotta = quantita_da_produrre, prezzo=row.prezzo,sconti=row.sconti,codice_iva=row.codice_iva,evasione=row.evasione,user_id=auth.user_id,riferimento_ordine=riferimento_ordine_cliente,id_ordine=id_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,id_riga_ordine=row.id)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def add_row_to_ddt_mod(args):
id_ordine = args['0']
# auth.user_id
# print "ID ORDINE : ",id_ordine
db((db.righe_in_ddt_cliente.user_id == auth.user_id) & (db.righe_in_ddt_cliente.id_ordine == id_ordine)).delete()
row = db(db.ordine_cliente.id == id_ordine).select().first()
ultimo_codice_ordine = row['ultimo_codice_ordine']
nome_cliente = row['nome_cliente']
data_inserimento = row['data_inserimento']
listino = row['listino']
riferimento_ordine_cliente = row['riferimento_ordine_cliente']
listino = row['listino']
magazzino_interno = row['magazzino_interno']
numero_ordine = row['ultimo_codice_ordine']
rows = db(db.righe_in_ordine_cliente.id_ordine_cliente == id_ordine).select()
quantita_prodotta=0
row_id=0
# print rows
for row in rows:
# print str(row.riga_emessa_in_ddt)
# print type(row.riga_emessa_in_ddt)
if "commento" in row.codice_articolo:
quantita_da_produrre = prenotato = quantita_prodotta = saldo = 0
pass
elif not row.riga_emessa_in_ddt:
"""
Vado a vedere la quantità attualmente prodotta salvata nella tabella "produzione_righe_per_ddt"
"""
row_id = row.id
dettagli_produzione_riga = db(db.produzione_righe_per_ddt.id_riga_ordine == row.id).select().first()
if dettagli_produzione_riga is not None:
# print "Riga trovata"
"""
Se ho trovato la riga vuol dire che è stata immessa una quantità in saldo.
Vado a recuperare la quantità prodotta
"""
quantita_da_produrre= int(row.quantita) - int(dettagli_produzione_riga.quantita_prodotta)
quantita_prodotta = dettagli_produzione_riga.quantita_prodotta
else:
# print "Riga non trovata"
"""
Metto la quantita prodotta = alla quantita richiesta per velocizzare l'inserimento
row.quantita è l'iniziale quantita richiesta nell'ordine
"""
quantita_da_produrre = 0
quantita_prodotta = 0
quantita = 0
if row.quantita:
quantita = row.quantita
# print row
db.righe_in_ddt_cliente.insert(saldo=ritorna_quantita_saldo(row_id),codice_ordine=numero_ordine,quantita_richiesta=quantita,quantita_prodotta = quantita_prodotta, prezzo=row.prezzo,sconti=row.sconti,codice_iva=row.codice_iva,evasione=row.evasione,user_id=auth.user_id,riferimento_ordine=riferimento_ordine_cliente,id_ordine=id_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,id_riga_ordine=row.id)
return "ok"
def ritorna_quantita_richiesta_da_riga_salvata(id_riga_salvata):
# print "IN RITORNA QUANTITA DA RIGA SALVATA ",id_riga_salvata
try:
riga_salvata = db(db.righe_in_ordine_cliente.id == id_riga_salvata).select().first()
# print "ECCOLO E ",riga_salvata
except Exception,e:
# print e
riga_salvata.quantita = 0
return riga_salvata.quantita
return 0
@service.jsonrpc
@service.jsonrpc2
def add_row_to_ddt_fornitori(args):
id_ordine = args['0']
# auth.user_id
# print "ID ORDINE : ",id_ordine
db((db.righe_in_ddt_fornitore.user_id == auth.user_id) & (db.righe_in_ddt_fornitore.id_ordine == id_ordine)).delete()
row = db(db.ordine_fornitore.id == id_ordine).select().first()
ultimo_codice_ordine = row['ultimo_codice_ordine']
nome_fornitore = row['nome_fornitore']
data_inserimento = row['data_inserimento']
listino = row['listino']
riferimento_ordine_fornitore = ""#row['riferimento_ordine_fornitore']
listino = row['listino']
magazzino_interno = row['magazzino_interno']
numero_ordine = row['ultimo_codice_ordine']
rows = db((db.righe_in_ordine_fornitore.id_ordine_fornitore == id_ordine),(db.righe_in_ordine_fornitore.riga_emessa_in_ddt == 'F')).select()
for row in rows:
# print str(row.riga_emessa_in_ddt)
# print type(row.riga_emessa_in_ddt)
if not row.riga_emessa_in_ddt:
db.righe_in_ddt_fornitore.insert(codice_ordine=numero_ordine,quantita=row.quantita,prezzo=row.prezzo,sconti=row.sconti,codice_iva=row.codice_iva,evasione=row.evasione,user_id=auth.user_id,riferimento_ordine=riferimento_ordine_fornitore,id_ordine=id_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,descrizione=row.commento)
return "ok"
def return_fatture_in_scadenza():
try:
month = int(request.vars['m'])
except:
month = datetime.datetime.now().month
day_start,day_end = monthrange(datetime.datetime.now().year, month)
day_start = 1
st = str(day_start)+"/"+str(month)+"/"+str(datetime.datetime.now().year)
start_date = datetime.datetime(datetime.datetime.now().year,month,day_start)
end_date = datetime.datetime(datetime.datetime.now().year,month,day_end)
# print start_date,end_date
fields=[db.fatture_salvate.nome_cliente,db.fatture_salvate.numero_fattura,db.fatture_salvate.scadenza,db.fatture_salvate.totale]
links=[lambda row: BUTTON("Aggiungi fattura",_onclick=XML('aggiungiFatturaAEffetti('+str(row.id)+')'),_class='button btn btn-default')]
form = SQLFORM.grid(db.fatture_salvate.scadenza <=end_date,user_signature=True,args=request.args[:1],create=False,editable=True,deletable=False,links=links,fields=fields,csv=False)
return dict(form=form)
def return_scadenziario():
try:
month = int(request.vars['m'])
except:
month = datetime.datetime.now().month
year = int(datetime.datetime.now().year)
if datetime.datetime.now().month > month:
year = year +1
# year=str(year)
day_start,day_end = monthrange(year, month)
day_start = 1
st = str(day_start)+"/"+str(month)+"/"+str(year)
start_date = datetime.datetime(year,month,day_start)
end_date = datetime.datetime(year,month,day_end)
# print start_date,end_date
db(db.scadenziario).delete()
rows = db((db.righe_in_ordine_cliente.evasione >=start_date) & (db.righe_in_ordine_cliente.evasione <=end_date) & (db.righe_in_ordine_cliente.riga_emessa_in_ddt == 'F') & (db.righe_in_ordine_cliente.codice_articolo == db.anagrafica_articoli.codice_articolo) & (db.righe_in_ordine_cliente.id_ordine_cliente == db.ordine_cliente.id)).select(orderby = db.righe_in_ordine_cliente.evasione)
for row in rows:
# print row
quantita_prodotta_fino_ad_ora = 0
q = db(db.produzione_righe_per_ddt.id_riga_ordine == row.righe_in_ordine_cliente.id).select().first()
if q is not None:
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta)
quantita_da_produrre = int(row.righe_in_ordine_cliente.quantita) - quantita_prodotta_fino_ad_ora
else:
quantita_da_produrre = row.righe_in_ordine_cliente.quantita
row.quantita_da_produrre = quantita_da_produrre
# print row.righe_in_ordine_cliente.prezzo
try:
prezzo = float(quantita_da_produrre) * float(row.righe_in_ordine_cliente.prezzo)
# print prezzo
prezzo = Money(str(prezzo),"EUR")
prezzo = prezzo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
# prezzo = str(row.prezzo).replace(".",",")
"""
prezzo=0
"""
# prezzo = float(quantita_da_produrre) * float(row.righe_in_ordine_cliente.prezzo)
except:
prezzo="Null"
if "commento" not in row.righe_in_ordine_cliente.codice_articolo:
if quantita_da_produrre >0:
db.scadenziario.insert(data_consegna = row.righe_in_ordine_cliente.evasione,cliente= row.ordine_cliente.nome_cliente,riferimento_ordine=row.ordine_cliente.riferimento_ordine_cliente,codice_ordine = row.ordine_cliente.ultimo_codice_ordine,codice_articolo = row.anagrafica_articoli.codice_articolo,descrizione = row.anagrafica_articoli.descrizione,qta_ordine = row.righe_in_ordine_cliente.quantita,qta_saldo = quantita_da_produrre,prezzo=prezzo,id_riga=row.righe_in_ordine_cliente.id)
db.scadenziario.id.readable = False
form = SQLFORM.grid(db.scadenziario,user_signature=True,args=request.args[:1],create=False,editable=False,deletable=False)
return dict(form=form)
def ritorna_quantita_saldo(id_riga_ordine_cliente):
quantita_prodotta_fino_ad_ora = 0
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine_cliente).select().first()
quantita_da_produrre = 0
riga = db(db.righe_in_ordine_cliente.id == id_riga_ordine_cliente).select().first()
quantita_riga=0
if riga:
quantita_riga = int(riga.quantita)
# print "ID RIGA ORDINE ",id_riga_ordine_cliente
# print "quantita riga : ",quantita_riga
if q is not None:
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta)
# print "Prodotta fino ad ora ",quantita_prodotta_fino_ad_ora
quantita_da_produrre = quantita_riga - quantita_prodotta_fino_ad_ora
else:
quantita_da_produrre = quantita_riga
return str(quantita_da_produrre)
def articoli_in_produzione():
db.articoli_in_produzione.id.readable = False
links=[lambda row: A(XML('Stampa RCP'),_class='button btn btn-default',_onClick=XML('stampaRcp('+str(row.id)+')'))]
form = SQLFORM.grid(db.articoli_in_produzione,create=False,editable=False,deletable=False,maxtextlength=100,paginate=10,links=links)
return dict(form=form)
def articoli_in_produzione_cron():
def ritorna_dettaglio_cliente(id_ordine,ordini_clienti):
# print "IN RITORNA DETTAGLIO"
# print "ID ORDINE CERCATO ",id_ordine
for ordine_cliente in ordini_clienti:
# print "ORDINE ID : ",ordine_cliente.id
if str(ordine_cliente.id) == str(id_ordine):
# print "TROVATO"
# print ordine_cliente
return ordine_cliente
return None
# print "qui"
db(db.articoli_in_produzione).delete()
# print "qui2"
# rows = db((db.righe_in_ordine_cliente.riga_emessa_in_ddt == 'F') & (db.righe_in_ordine_cliente.codice_articolo == db.anagrafica_articoli.codice_articolo) & (db.righe_in_ordine_cliente.id_ordine_cliente == db.ordine_cliente.id)).select(orderby = db.righe_in_ordine_cliente.evasione)
# rows = db((db.righe_in_ordine_cliente.riga_emessa_in_ddt == 'F') & (db.righe_in_ordine_cliente.codice_articolo == db.anagrafica_articoli.codice_articolo) & (db.righe_in_ordine_cliente.id_ordine_cliente == db.ordine_cliente.id)).select()
rows=db((db.righe_in_ordine_cliente.riga_emessa_in_ddt == 'F') & (db.righe_in_ordine_cliente.codice_articolo == db.anagrafica_articoli.codice_articolo)).select(orderby = db.righe_in_ordine_cliente.evasione)
# print rows
dati_clienti = db(db.ordine_cliente).select()
iterazione=0
for row in rows:
# print iterazione
iterazione+=1
dettaglio_cliente = ritorna_dettaglio_cliente(row.righe_in_ordine_cliente.id_ordine_cliente,dati_clienti)
# ( db(db.ordine_cliente.id == db.righe_in_ordine_cliente.id_ordine_cliente).select().first()
if dettaglio_cliente is not None:
quantita_prodotta_fino_ad_ora = 0
q = db(db.produzione_righe_per_ddt.id_riga_ordine == row.righe_in_ordine_cliente.id).select().first()
if q is not None:
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta)
# print "Fino ad ora ",quantita_prodotta_fino_ad_ora
quantita_da_produrre = int(row.righe_in_ordine_cliente.quantita) - quantita_prodotta_fino_ad_ora
else:
quantita_da_produrre = row.righe_in_ordine_cliente.quantita
row.quantita_da_produrre = quantita_da_produrre
# print row.righe_in_ordine_cliente.prezzo
try:
prezzo = float(quantita_da_produrre) * float(row.righe_in_ordine_cliente.prezzo)
# print prezzo
prezzo = Money(str(prezzo),"EUR")
prezzo = prezzo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
# prezzo = str(row.prezzo).replace(".",",")
"""
prezzo=0
"""
# prezzo = float(quantita_da_produrre) * float(row.righe_in_ordine_cliente.prezzo)
except:
prezzo="Null"
# print "Eccezzione"
if "commento" not in row.righe_in_ordine_cliente.codice_articolo:
if quantita_da_produrre > 0:
# print "Inserisco"
dettaglio_cliente
# dettaglio_cliente = dettaglio_cliente.ordine_cliente
db.articoli_in_produzione.insert(data_consegna = row.righe_in_ordine_cliente.evasione,cliente= dettaglio_cliente.nome_cliente,riferimento_ordine=dettaglio_cliente.riferimento_ordine_cliente,codice_ordine = dettaglio_cliente.ultimo_codice_ordine,codice_articolo = row.anagrafica_articoli.codice_articolo,descrizione = row.anagrafica_articoli.descrizione,qta_ordine = row.righe_in_ordine_cliente.quantita,qta_saldo = quantita_da_produrre,prezzo=prezzo,id_riga=str(row.righe_in_ordine_cliente.id))
return locals()
def scadenziario():
current_month = 1
return locals()
def gestione_numero_fattura():
form = SQLFORM.grid(db.fattura,csv=False,create=False,editable=True,searchable=False)
return locals()
def gestione_numero_ddt():
form = SQLFORM.grid(db.ddt,csv=False,create=False,editable=True,searchable=False,deletable=False)
return locals()
def ritorna_numero_ddt_da_ddt_id(id):
ddt_id = db(db.ddt_da_fatturare.id==id).select()
# print ddt_id
# numero_ddt = db(db.ddt_cliente.ddt_id == ddt_id).select().first()["numero_ddt"]
return ddt_id
def ddt_da_fatturare():
db.ddt_da_fatturare.user_id.default = auth.user_id
# db.ddt_da_fatturare.numero_ddt = Field.Virtual("Numero_ddt",lambda row:ritorna_numero_ddt_da_ddt_id(row.ddt_da_fatturare.id))
fields = [db.ddt_da_fatturare.numero_ddt,db.ddt_da_fatturare.data_emissione,db.ddt_da_fatturare.totale]
form = SQLFORM.grid(db.ddt_da_fatturare,fields=fields,csv=False,create=False,editable=False,searchable=False)
return locals()
def righe_in_ddt_cliente():
db.righe_in_ddt_cliente.user_id.default = auth.user_id
db.righe_in_ddt_cliente.quantita_richiesta.writable=False
db.righe_in_ddt_cliente.quantita_richiesta.readonly=True
if len(request.args) > 1 and ('edit' in request.args):
# print "ECCOLO"
fields = [db.righe_in_ddt_cliente.quantita_richiesta,db.righe_in_ddt_cliente.quantita_prodotta,db.righe_in_ddt_cliente.prezzo]
form = SQLFORM.grid(db.righe_in_ddt_cliente,fields=fields,csv=False,user_signature=True,args=request.args[:1])
else:
fields = [db.righe_in_ddt_cliente.codice_ordine,db.righe_in_ddt_cliente.codice_articolo,db.righe_in_ddt_cliente.n_riga,db.righe_in_ddt_cliente.riferimento_ordine,db.righe_in_ddt_cliente.quantita_richiesta,db.righe_in_ddt_cliente.saldo,db.righe_in_ddt_cliente.quantita_prodotta,db.righe_in_ddt_cliente.prezzo,db.righe_in_ddt_cliente.evasione]
form = SQLFORM.grid(db.righe_in_ddt_cliente.user_id==auth.user_id,fields=fields,csv=False)
return locals()
def righe_in_ddt_cliente_mod():
db.righe_in_ddt_cliente.user_id.default = auth.user_id
db.righe_in_ddt_cliente.quantita_richiesta.writable=False
db.righe_in_ddt_cliente.quantita_richiesta.readonly=True
if len(request.args) > 1 and ('edit' in request.args):
# print "ECCOLO"
fields = [db.righe_in_ddt_cliente.quantita_richiesta,db.righe_in_ddt_cliente.quantita_prodotta,db.righe_in_ddt_cliente.prezzo]
form = SQLFORM.grid(db.righe_in_ddt_cliente,fields=fields,csv=False,user_signature=True,args=request.args[:1])
else:
fields = [db.righe_in_ddt_cliente.codice_ordine,db.righe_in_ddt_cliente.codice_articolo,db.righe_in_ddt_cliente.n_riga,db.righe_in_ddt_cliente.riferimento_ordine,db.righe_in_ddt_cliente.quantita_richiesta,db.righe_in_ddt_cliente.saldo,db.righe_in_ddt_cliente.quantita_prodotta,db.righe_in_ddt_cliente.prezzo,db.righe_in_ddt_cliente.evasione]
form = SQLFORM.grid(db.righe_in_ddt_cliente.user_id==auth.user_id,fields=fields,csv=False)
return locals()
def righe_in_ddt_fornitore():
db.righe_in_ddt_fornitore.user_id.default = auth.user_id
fields = [db.righe_in_ddt_fornitore.codice_ordine,db.righe_in_ddt_fornitore.codice_articolo,db.righe_in_ddt_fornitore.n_riga,db.righe_in_ddt_fornitore.riferimento_ordine,db.righe_in_ddt_fornitore.u_m,db.righe_in_ddt_fornitore.quantita,db.righe_in_ddt_fornitore.prezzo,db.righe_in_ddt_fornitore.sconti,db.righe_in_ddt_fornitore.codice_iva,db.righe_in_ddt_fornitore.evasione]
form = SQLFORM.grid(db.righe_in_ddt_fornitore.user_id == auth.user_id,fields=fields,csv=False)
return locals()
def aspetto_esteriore_dei_beni():
form = SQLFORM.grid(db.aspetto_esteriore_dei_beni)
return locals()
def causali():
form = SQLFORM.grid(db.causali)
return locals()
def porto():
form = SQLFORM.grid(db.porto)
return locals()
def modifica_ddt():
errore = False
try:
ddt_id = request.vars.a
id_cliente = request.vars.b
# print "DDT ID : "+ddt_id
nome_cliente = db(db.clienti.id==id_cliente).select().first()["nome"]
db(db.righe_in_ddt_cliente.user_id==auth.user_id).delete()
d = db(db.saved_ddt.saved_ddt_id == ddt_id).select().first()
numero_ddt_corrente = numero_ddt=d["numero_ddt"]
data_ddt=datetime.datetime.strptime(d["data_inserimento"][0:10],"%Y-%m-%d").date()
data_ddt=data_ddt.strftime("%d/%m/%Y")
righe_form="ok"
db(db.righe_in_ddt_cliente).delete()
# print "SONO QUI"
query=db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id).select()
for r in query:
if "commento" in r.codice_articolo:
quantita_da_produrre = prenotato = quantita_prodotta = saldo = 0
pass
elif "commento" not in r.codice_articolo:
# print "prima"
"""
Vado a vedere la quantità attualmente prodotta salvata nella tabella "produzione_righe_per_ddt"
"""
# print "RIGA VEFIASDFA"
# print "ciao"
if r.id_riga_ordine is None or len(r.id_riga_ordine)<1:
# print "riciao"
id_riga_ordine=db((db.righe_in_ordine_cliente.id_ordine_cliente == r.id_ordine) & (db.righe_in_ordine_cliente.n_riga ==r.n_riga)).select().first()
if id_riga_ordine is not None:
id_riga_ordine = id_riga_ordine["id"]
else:
errore = True
# print r
msg = "La riga {0} dell'ordine {1} è stata cancellata dalle righe dell'ordine".format(r.n_riga,r.id_ordine)
response.flash=msg
else:
# print "provo"
id_riga_ordine = r.id_riga_ordine
# print "ID RIGA ORDINE ",id_riga_ordine
row_id = r.id
dettagli_produzione_riga = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
dettagli_produzione_riga = db((db.saved_righe_in_ddt_cliente.id_riga_ordine == id_riga_ordine) & (db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id)).select().first()
# print dettagli_produzione_riga
if dettagli_produzione_riga is not None:
# print "Riga trovata"
"""
Se ho trovato la riga vuol dire che è stata immessa una quantità in saldo.
Vado a recuperare la quantità prodotta
"""
# quantita_da_produrre= int(ritorna_quantita_richiesta_da_riga_salvata(id_riga_ordine)) - int(dettagli_produzione_riga.quantita_prodotta)
# quantita_da_produrre= int(dettagli_produzione_riga.quantita_prodotta)
quantita_da_produrre= int(dettagli_produzione_riga.quantita)
# print "quantita da produrre ",quantita_da_produrre
quantita_prodotta = dettagli_produzione_riga.quantita
else:
# print "Riga non trovata"
"""
Metto la quantita prodotta = alla quantita richiesta per velocizzare l'inserimento
row.quantita è l'iniziale quantita richiesta nell'ordine
"""
# quantita_da_produrre = r.quantita
quantita_da_produrre = 0
quantita_prodotta = 0
db.righe_in_ddt_cliente.insert(saldo=ritorna_quantita_saldo(id_riga_ordine),user_id = auth.user_id,codice_articolo = r.codice_articolo,descrizione=r.descrizione,riferimento_ordine=r.riferimento_ordine,u_m=r.u_m,prezzo=r.prezzo,sconti=r.sconti,codice_iva=r.codice_iva,n_riga=r.n_riga,evasione=r.evasione,id_ordine=r.id_ordine,codice_ordine=r.codice_ordine,quantita_richiesta=ritorna_quantita_richiesta_da_riga_salvata(id_riga_ordine),quantita_prodotta=quantita_da_produrre,id_riga_ordine=r.id_riga_ordine)
# print "SONO QUIkk"
# print ddt_id
ordine_id = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id).select().first()["id_ordine"]
# print "SONO QUI2"
numero_riga_corrente = db(db.righe_in_ordine_cliente.id_ordine_cliente==ordine_id).count()+1
db.righe_in_ordine_cliente.n_riga.default = numero_riga_corrente
db.righe_in_ordine_cliente.n_riga.writable = False
db.righe_in_ordine_cliente.id_ordine_cliente.default = ordine_id
db.righe_in_ordine_cliente.id_ordine_cliente.writable = False
db.righe_in_ordine_cliente.prezzo.default = 0
# db.righe_in_ordine_cliente.prezzo.writable = False
# fields=['']
cliente = db(db.clienti.id == id_cliente).select().first()
db.righe_in_ordine_cliente.codice_iva.default=cliente.codice_iva
ddt_id2 = db(db.ddt_cliente.id == ddt_id).select()
links=[lambda row: BUTTON("Aggiungi righe",_onclick=XML('aggiungiRigheMod('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.ordine_cliente.ultimo_codice_ordine,db.ordine_cliente.riferimento_ordine_cliente,db.ordine_cliente.data_ordine_cliente]
query=((db.ordine_cliente.id_cliente== id_cliente) & (db.ordine_cliente.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
righe_in_ordine_cliente_form = SQLFORM.grid(query=query,formname='ordini_clienti_ddt',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
luoghi = []
row = db(db.clienti.id == id_cliente).select().first()
error = False
if row.citta is None:
response.flash="Il cliente non ha la città in anagrafica\nAggiornare l'anagrafica per poter emettere il DDT"
error=True
try:
if len(row.luogo_consegna_1) > 0:
luoghi.append(row.luogo_consegna_1)
if len(row.luogo_consegna_2) > 0:
luoghi.append(row.luogo_consegna_2)
if len(row.luogo_consegna_3) > 0:
luoghi.append(row.luogo_consegna_3)
if len(row.luogo_consegna_4) > 0:
luoghi.append(row.luogo_consegna_4)
if len(row.luogo_consegna_5) > 0:
luoghi.append(row.luogo_consegna_5)
if len(row.luogo_consegna_6) > 0:
luoghi.append(row.luogo_consegna_6)
except:
luoghi.append("Cliente,,,,,,")
trasporto_a_mezzo = Set()
trasporto_a_mezzo.add("Mittente")
trasporto_a_mezzo.add("Destinatario")
trasporto_a_mezzo.add("Vettore")
aspetto_esteriore_dei_beni = Set()
rows = db(db.aspetto_esteriore_dei_beni).select()
for row in rows:
aspetto_esteriore_dei_beni.add(row.nome)
causali = Set()
rows = db(db.causali).select()
for row in rows:
causali.add(row.nome)
porto = Set()
rows = db(db.porto).select()
for row in rows:
porto.add(row.nome)
except Exception, e:
# print e
errore=True;
return locals()
def fatturazione_istantanea_2():
# print request.args
id_cliente = request.args[0]
# print request.args[0]
# print "ID CLIENTE = {0}".format(id_cliente)
nome_cliente =db(db.clienti.id==id_cliente).select().first()["nome"]
# print nome_cliente
form_righe = form = SQLFORM.grid(db.righe_in_fattura_istantanea,formname='mod',maxtextlength=100,create=True,editable=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,user_signature=True,args=request.args[:1])
new_order = False
if 'new' in request.args:
new_order = True
return locals()
def nota_di_accredito_2():
id_cliente = request.args[0]
# print request.args[0]
# print "ID CLIENTE = {0}".format(id_cliente)
nome_cliente =db(db.clienti.id==id_cliente).select().first()["nome"]
if "leonardo" in nome_cliente.lower():
enti=db(db.enti_leonardo).select()
else:
enti=""
# print nome_cliente
form_righe = form = SQLFORM.grid(db.righe_in_fattura_istantanea,formname='mod',maxtextlength=100,create=True,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,user_signature=True,args=request.args[:1])
new_order = False
if 'new' in request.args:
new_order = True
return locals()
def mod_ddt_clienti_2():
id_cliente = request.args[0]
nome_cliente =db(db.clienti.id==id_cliente).select().first()["nome"]
"""
Ritornare i ddt collegati al cliente
"""
db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
fields = [db.ddt_cliente.numero_ddt,db.ddt_cliente.data_richiesta]
query=((db.ddt_cliente.id_cliente == id_cliente) & (db.ddt_cliente.numero_ddt !="None"))
links=[lambda row: A("Modifica",_href=URL('modifica_ddt',vars=dict(a=row.id,b=id_cliente)),_class='button btn btn-default')]
form = SQLFORM.grid(query=query,formname='mod',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,fields=fields,user_signature=True,args=request.args[:1],links=links)
# form ="hello"
return locals()
def mod_ddt_clienti_3():
id_ddt = request.args[0]
# db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
# print "ID CLIENTE IN DDT2 = ",id_cliente
luoghi = []
row = db(db.clienti.id == id_cliente).select().first()
error = False
if row.citta is None:
response.flash="Il cliente non ha la città in anagrafica\nAggiornare l'anagrafica per poter emettere il DDT"
error=True
try:
if len(row.luogo_consegna_1) > 0:
luoghi.append(row.luogo_consegna_1)
if len(row.luogo_consegna_2) > 0:
luoghi.append(row.luogo_consegna_2)
if len(row.luogo_consegna_3) > 0:
luoghi.append(row.luogo_consegna_3)
if len(row.luogo_consegna_4) > 0:
luoghi.append(row.luogo_consegna_4)
if len(row.luogo_consegna_5) > 0:
luoghi.append(row.luogo_consegna_5)
if len(row.luogo_consegna_6) > 0:
luoghi.append(row.luogo_consegna_6)
except:
luoghi.append("Cliente,,,,,,")
trasporto_a_mezzo = Set()
trasporto_a_mezzo.add("Mittente")
trasporto_a_mezzo.add("Destinatario")
trasporto_a_mezzo.add("Vettore")
aspetto_esteriore_dei_beni = Set()
rows = db(db.aspetto_esteriore_dei_beni).select()
for row in rows:
aspetto_esteriore_dei_beni.add(row.nome)
causali = Set()
rows = db(db.causali).select()
for row in rows:
causali.add(row.nome)
porto = Set()
rows = db(db.porto).select()
for row in rows:
porto.add(row.nome)
ddt_id2 = db(db.ddt_cliente.id == id_ddt).select()
links=[lambda row: BUTTON("Aggiungi righe",_onclick=XML('aggiungiRighe('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.ordine_cliente.ultimo_codice_ordine,db.ordine_cliente.riferimento_ordine_cliente,db.ordine_cliente.data_ordine_cliente]
query=((db.ordine_cliente.id_cliente== id_cliente) & (db.ordine_cliente.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
righe_in_ordine_cliente_form = SQLFORM.grid(query=query,formname='ordini_clienti_ddt',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def ddt_clienti_2():
id_ddt = request.args[0]
# db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
# print "ID CLIENTE IN DDT2 = ",id_cliente
luoghi = []
row = db(db.clienti.id == id_cliente).select().first()
error = False
if row.citta is None:
response.flash="Il cliente non ha la città in anagrafica\nAggiornare l'anagrafica per poter emettere il DDT"
error=True
try:
if len(row.luogo_consegna_1) > 0:
luoghi.append(row.luogo_consegna_1)
if len(row.luogo_consegna_2) > 0:
luoghi.append(row.luogo_consegna_2)
if len(row.luogo_consegna_3) > 0:
luoghi.append(row.luogo_consegna_3)
if len(row.luogo_consegna_4) > 0:
luoghi.append(row.luogo_consegna_4)
if len(row.luogo_consegna_5) > 0:
luoghi.append(row.luogo_consegna_5)
if len(row.luogo_consegna_6) > 0:
luoghi.append(row.luogo_consegna_6)
except:
luoghi.append("Cliente,,,,,,")
selected_trasporto = row.trasporto_a_mezzo
selected_causale = row.causale_trasporto
selected_porto=row.porto
selected_vettore=row.vettore
# print selected_causale
trasporto_a_mezzo = Set()
trasporto_a_mezzo.add("Mittente")
trasporto_a_mezzo.add("Destinatario")
trasporto_a_mezzo.add("Vettore")
aspetto_esteriore_dei_beni = Set()
rows = db(db.aspetto_esteriore_dei_beni).select()
for row in rows:
aspetto_esteriore_dei_beni.add(row.nome)
causali = Set()
rows = db(db.causali).select()
for row in rows:
causali.add(row.nome)
porto = Set()
rows = db(db.porto).select()
for row in rows:
porto.add(row.nome)
ddt_id2 = db(db.ddt_cliente.id == id_ddt).select()
links=[lambda row: BUTTON("Aggiungi righe",_onclick=XML('aggiungiRighe('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.ordine_cliente.ultimo_codice_ordine,db.ordine_cliente.riferimento_ordine_cliente,db.ordine_cliente.data_ordine_cliente]
query=((db.ordine_cliente.id_cliente== id_cliente) & (db.ordine_cliente.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
righe_in_ordine_cliente_form = SQLFORM.grid(query=query,formname='ordini_clienti_ddt',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def ddt_fornitori_2():
id_ddt = request.args[0]
db(db.righe_in_ddt_fornitore.user_id == auth.user_id).delete()
ddt_id = db(db.ddt_fornitore.id == id_ddt).select().first()
id_fornitore = ddt_id.id_fornitore
nome_fornitore = ddt_id.nome_fornitore
try:
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
except:
db.ddt.insert(numero_ddt="0/17")
numero_ddt_corrente = "1/17"
row = db(db.fornitori.id == id_fornitore).select().first()
error = False
if row.citta is None:
response.flash="Il fornitore non ha la città in anagrafica\nAggiornare l'anagrafica per poter emettere il DDT"
error=True
luoghi = []
try:
if len(row.luogo_consegna_1) is not Null:
luoghi.append(row.luogo_consegna_1)
if len(row.luogo_consegna_2) is not Null:
luoghi.append(row.luogo_consegna_2)
except:
luoghi.append("Indirizzo fornitore,,,,,")
trasporto_a_mezzo = Set()
trasporto_a_mezzo.add("Mittente")
trasporto_a_mezzo.add("Destinatario")
trasporto_a_mezzo.add("Vettore")
aspetto_esteriore_dei_beni = Set()
rows = db(db.aspetto_esteriore_dei_beni).select()
for row in rows:
aspetto_esteriore_dei_beni.add(row.nome)
causali = Set()
rows = db(db.causali).select()
for row in rows:
causali.add(row.nome)
porto = Set()
rows = db(db.porto).select()
for row in rows:
porto.add(row.nome)
ddt_id2 = db(db.ddt_cliente.id == id_ddt).select()
links=[lambda row: BUTTON("Aggiungi righe",_onclick=XML('aggiungiRigheFornitore('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.ordine_fornitore.ultimo_codice_ordine,db.ordine_fornitore.riferimento_ordine_cliente,db.ordine_fornitore.data_ordine_fornitore]
query=((db.ordine_fornitore.id_fornitore== id_fornitore) & (db.ordine_fornitore.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
righe_in_ordine_fornitore_form = SQLFORM.grid(query=query,formname='ordini_fornitorii_ddt',maxtextlength=100,create=False,editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def crea_riba():
current_month = 1
return locals()
@service.jsonrpc
@service.jsonrpc2
def ristampa_fattura_da_id(args):
id_fattura=args['0']
dati_fattura = db(db.fatture_salvate.id == id_fattura).select().first()
# print dati_fattura
id_cliente = dati_fattura.id_cliente
ddts_id = dati_fattura.id_ddt
# response.flash = ddts_id
numero_fattura_da_salvare = dati_fattura.numero_fattura
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
fattura = FATTURA("FATTURA DIFFERITA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADEMZA")
ddts_id = eval(ddts_id)
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
lista_ddt = []
for ddt_id in ddts_id:
lista_ddt.append(ddt_id)
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id).select()
# print "DDT ID : ",ddt_id
for row in rows:
id_ordine = row.id_ordine
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza = scadenza.strftime("%d/%m/%Y")
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except:
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
fattura.insert_rows()
fattura.create_pdf()
def ristampa_fattura():
links=[lambda row: BUTTON("Ristampa",_onclick=XML('ristampaFattura('+str(row.id)+')'),_class='button btn btn-default')]
fields=[db.fatture_salvate.data_fattura,db.fatture_salvate.numero_fattura,db.fatture_salvate.nome_cliente,db.fatture_salvate.totale]
fatture_da_ristampare = SQLFORM.grid(db.fatture_salvate,formname='fatture_salvate',maxtextlength=100,create=False,editable=False, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,fields=fields)
return locals()
def fatturazione_differita_2():
id_fattura = request.args[0]
fattura = db(db.fattura_cliente.id == id_fattura).select().first()
id_cliente = fattura.id_cliente
dal = fattura.dal
al_fixed = fattura.al
al = fattura.al + datetime.timedelta(days=2)
nome_cliente = fattura.nome_cliente
"""
"""
# print "ID CLIENTE IN FATTURA DIFFERITA = ",id_cliente
"""
Select all ddts of the selected client.
"""
# print fattura.dal,al
ddts_id = ((db.ddt_cliente.id_cliente == id_cliente) & (db.ddt_cliente.data_richiesta >= fattura.dal) & (db.ddt_cliente.data_richiesta <= al) & (db.ddt_cliente.fattura_emessa == 'F') & (db.ddt_cliente.numero_ddt != 'None'))
# ddts_id = ((db.ddt_cliente.id_cliente == id_cliente) & (db.ddt_cliente.data_richiesta >= fattura.dal) & (db.ddt_cliente.data_richiesta <= al) & (db.ddt_cliente.numero_ddt != 'None'))
links=[lambda row: BUTTON("Aggiungi DDT",_onclick=XML('aggiungiDDT('+str(row.id)+')'),_class='button btn btn-default')]
db.ddt_cliente.totale = Field.Virtual("Totale", lambda row: calcola_totale_iva_inclusa_da_ddt(row.ddt_cliente.id))
# db.ddt_cliente.totale = Field.Virtual("Totale", lambda row: 0)
fields=[db.ddt_cliente.data_richiesta,db.ddt_cliente.numero_ddt,db.ddt_cliente.totale]
# query=((db.ordine_cliente.id_cliente== id_cliente) & (db.ordine_cliente.ddt_completato =='F'))
# query=(db.ordine_cliente.ddt_completato == '0')
print "---------------"
ddt_da_fatturare = SQLFORM.grid(query=ddts_id,formname='ordini_clienti_ddt',maxtextlength=100,create=False,editable=False, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,links=links,user_signature=True,args=request.args[:1],fields=fields)
return locals()
def calcola_totale_per_mese_da_ddt_cliente():
current_month = 1
return locals()
def calcola_totale_per_anno():
current_year=2018
return locals()
def calcola_totale_per_anno_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
try:
year = int(request.vars['y'])
except:
year = datetime.datetime.now().year
# day_start,day_end = monthrange(datetime.datetime.now().year, month)
# day_start = 1
# st = str(day_start)+"/"+str(month)+"/"+str(datetime.datetime.now().year)
start_date = datetime.datetime(year,1,1)
end_date = datetime.datetime(year,12,31).date()
# print start_date,end_date
rows1= db(db.clienti).select()
db(db.totali_ddt_mese_).delete()
db.totali_ddt_mese_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
ddts = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None')).select()
for ddt in ddts:
nome_cliente = ddt.nome_cliente
# print "NOME CLIENTE = ",nome_cliente,ddt.id
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
riga.append(nome_cliente)
riga.append(totale)
lista.append(riga)
db.totali_ddt_mese_.insert(cliente=nome_cliente,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_mese_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
return locals()
def calcola_totale_per_anno_leonardo():
current_year=1
return locals()
def calcola_totale_per_anno_leonardo_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
form=""
totalissimo=1000
try:
year = int(request.vars['y'])
except:
year = datetime.datetime.now().year
# day_start,day_end = monthrange(datetime.datetime.now().year, month)
day_start = 1
st = str(day_start)+"/"+str(1)+"/"+str(year)
start_date = datetime.datetime(year,1,day_start)
end_date = datetime.datetime(year,12,31).date() + timedelta(days=1)
# print start_date,end_date
# return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
rows1= db(db.clienti.id==41).select()
db(db.totali_ddt_anno_).delete()
db.totali_ddt_anno_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
dest1 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('CHIETI'))).select()
dest2 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('BISENZIO'))).select()
dest3 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('BAINSIZZA'))).select()
dest4 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('NERVIANO'))).select()
dest5 = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None') & (db.ddt_cliente.consegna.contains('ADRIATICA'))).select()
riga=[]
totale=0
for ddt in dest1:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='CHIETI'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest2:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='CAMPI BISENZIO'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest3:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='BORGO BAINSIZZA'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest4:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='NERVIANO'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
riga=[]
totale=0
for ddt in dest5:
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
consegna='FOCACCIA GROUP SRL'
riga.append(consegna)
riga.append(totale)
lista.append(riga)
db.totali_ddt_anno_.insert(destinazione=consegna,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_anno_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
def calcola_totale_per_mese_da_ddt_cliente_data():
lista=[]
riga=[]
riga.append("Cliente")
riga.append("Totale")
lista.append(riga)
try:
month = int(request.vars['m'])
except:
month = datetime.datetime.now().month
day_start,day_end = monthrange(datetime.datetime.now().year, month)
day_start = 1
st = str(day_start)+"/"+str(month)+"/"+str(datetime.datetime.now().year)
start_date = datetime.datetime(datetime.datetime.now().year,month,day_start)
end_date = datetime.datetime(datetime.datetime.now().year,month,day_end).date() + timedelta(days=1)
# print start_date,end_date
rows1= db(db.clienti).select()
db(db.totali_ddt_mese_).delete()
db.totali_ddt_mese_.id.readable=False;
totalissimo=0
nome_cliente=""
for r1 in rows1:
try:
riga=[]
totale = 0
ddts = db((db.ddt_cliente.id_cliente == r1.id) & (db.ddt_cliente.data_richiesta >= start_date) & (db.ddt_cliente.data_richiesta <= end_date) & (db.ddt_cliente.numero_ddt != 'None')).select()
for ddt in ddts:
nome_cliente = ddt.nome_cliente
# print "NOME CLIENTE = ",nome_cliente,ddt.id
totale += ritorna_int_calcola_totale_iva_esclusa_da_ddt(ddt.id)
if totale > 0:
riga.append(nome_cliente)
riga.append(totale)
lista.append(riga)
db.totali_ddt_mese_.insert(cliente=nome_cliente,totale=ritorna_prezzo_europeo(totale))
totalissimo +=totale
except Exception,e:
# print "ECCEZZIONE ",e
pass
# print lista
form = SQLFORM.grid(db.totali_ddt_mese_,deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,user_signature=True,args=request.args[:1])
return dict(lista=json.dumps(lista),form=form,totalissimo = ritorna_prezzo_europeo(totalissimo))
def ritorna_prezzo_europeo(importo):
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
return importo
def ritorna_int_calcola_totale_iva_esclusa_da_ddt(id_ddt):
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt).select()
# print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
for row in rows:
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
# codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
# imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except:
pass
totale = importo_totale+imposta_totale
# print "DDT NUMERO : {0} TOTALE {1}".format(id_ddt,totale)
return totale
def ritorna_int_calcola_totale_iva_inclusa_da_ddt(id_ddt):
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt).select()
# print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
for row in rows:
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except:
pass
totale = importo_totale+imposta_totale
# print "DDT NUMERO : {0} TOTALE {1}".format(id_ddt,totale)
return totale
def calcola_totale_iva_inclusa_da_ddt(id_ddt):
print "Dentro qui"
print "DDT ID : ",id_ddt
rows = db((db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt) & (db.saved_righe_in_ddt_cliente.codice_articolo !="commento")).select()
print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
print "sono qui"
for row in rows:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except Exception,e:
print e
pass
totale = importo_totale+imposta_totale
totale = Money(str(totale),"EUR")
totale = totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
print "Totale calcolato = ",totale
return totale
def fatturazione_differita():
fields = ['nome_cliente','dal','al']
cliente_form = SQLFORM(db.fattura_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
db(db.ddt_da_fatturare.user_id == auth.user_id).delete()
row = db(db.fattura_cliente.id == cliente_form.vars.id).select().first()
row.update_record(id_cliente = id_cliente.id)
redirect(URL('fatturazione_differita_2',args=cliente_form.vars.id))
return locals()
def fatturazione_istantanea():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_fattura_istantanea).delete()
redirect(URL('fatturazione_istantanea_2',args=id_cliente.id))
return locals()
def nota_di_accredito():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_fattura_istantanea).delete()
redirect(URL('nota_di_accredito_2',args=id_cliente.id))
return locals()
def ddt_clienti():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
redirect(URL('ddt_clienti_2',args=cliente_form.vars.id))
return locals()
def mod_ddt_clienti():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form_mod',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
# row.update_record(id_cliente = id_cliente.id)
# db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
redirect(URL('mod_ddt_clienti_2',args=id_cliente.id))
return locals()
def ddt_fornitori():
fields = ['nome_fornitore']
fornitore_form = SQLFORM(db.ddt_fornitore,formname='fornitore_form',formstyle = 'table3cols',fields=fields)
if fornitore_form.process().accepted:
# print fornitore_form.vars.nome_fornitore
id_fornitore = db(db.fornitori.nome == fornitore_form.vars.nome_fornitore).select().first()
row = db(db.ddt_fornitore.id == fornitore_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_fornitore = id_fornitore.id)
redirect(URL('ddt_fornitori_2',args=fornitore_form.vars.id))
return locals()
def ddt_clienti_old():
links=[lambda row: A(XML('Crea bolla'),_class='button btn btn-default',_href=URL('dettaglio_bolla',args=row.id))]
fields=[db.righe_in_ordine_cliente.n_riga,db.righe_in_ordine_cliente.codice_articolo,db.righe_in_ordine_cliente.quantita,db.righe_in_ordine_cliente.prezzo,db.righe_in_ordine_cliente.sconti,db.righe_in_ordine_cliente.codice_iva,db.righe_in_ordine_cliente.evasione]
righe_in_ordine_cliente_form = SQLFORM.grid(db.ordine_cliente,formname='ordini_clienti',maxtextlength=100,create=False,editable=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,links=links)
return dict(righe_in_ordine_cliente_form=righe_in_ordine_cliente_form)
def gestione_piano_dei_conti():
return dict(message="ok")
def anagrafica_codici_iva():
codici_iva_form = SQLFORM.grid(db.anagrafica_codici_iva,formname='codici_iva',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
codici_iva_form.element('.web2py_counter', replace=None)
return dict(codici_iva_form = codici_iva_form)
def anagrafica_banche():
anagrafica_banche_form = SQLFORM.grid(db.anagrafica_banche,formname='anagrafica_banche_form',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
anagrafica_banche_form.element('.web2py_counter', replace=None)
try:
anagrafica_banche_form.element('input[name=descrizione_sottoconto]')['_style'] = 'width:350px;height:25px;'
anagrafica_banche_form.element('input[name=descrizione]')['_style'] = 'width:350px;height:25px;'
except:
pass
return dict(anagrafica_banche_form = anagrafica_banche_form)
def anagrafica_banche_azienda():
anagrafica_banche_form = SQLFORM.grid(db.anagrafica_banche_azienda,formname='anagrafica_banche_form',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True)
anagrafica_banche_form.element('.web2py_counter', replace=None)
try:
anagrafica_banche_form.element('input[name=descrizione_sottoconto]')['_style'] = 'width:350px;height:25px;'
anagrafica_banche_form.element('input[name=descrizione]')['_style'] = 'width:350px;height:25px;'
except:
pass
return dict(anagrafica_banche_form = anagrafica_banche_form)
def fatture_form():
fields = [db.fatture_salvate.data_fattura,db.fatture_salvate.numero_fattura,db.fatture_salvate.totale,db.fatture_salvate.nome_cliente,db.fatture_salvate.scadenza]
"""Patch per sistemare la data
"""
x = datetime.datetime(1999, 5, 17)
fatture=db(db.fatture_salvate.scadenza > x).select()
for fattura in fatture:
original_start_date = fattura.data_fattura
if original_start_date is not None:
day_start,day_end = monthrange(original_start_date.year, original_start_date.month)
d = str(day_end)+"/"+str(original_start_date.month)+"/"+str(original_start_date.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
# print original_start_date,start_date
fattura.data_fattura = start_date
fattura.update_record()
if len(request.args) > 1 and ('edit' in request.args):
db.fatture_salvate.numero_fattura.writable=False
db.fatture_salvate.id_ddt.writable=False
db.fatture_salvate.id_ddt.readable=False
db.fatture_salvate.id_cliente.writable=False
db.fatture_salvate.id_cliente.readable=False
db.fatture_salvate.id_cliente.writable=False
db.fatture_salvate.id_cliente.readable=False
db.fatture_salvate.richiede_riba.writable=False
db.fatture_salvate.richiede_riba.readable=False
db.fatture_salvate.riba_emessa.writable=False
db.fatture_salvate.riba_emessa.readable=False
links=[lambda row: BUTTON("Aggiungi fattura",_onclick=XML('aggiungiFattura('+str(row.id)+')'),_class='button btn btn-default')]
fatture_form = SQLFORM.grid(db.fatture_salvate.richiede_riba=='T',formname='fatture',maxtextlength=100,create=False, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,fields=fields,links=links,exportclasses=export_classes)
return locals()
@service.jsonrpc
@service.jsonrpc2
def successivo_riba(banca):
if db(db.fatture_scelte).isempty():
response.flash="Selezionare almeno una fattura"
return 1/0
db(db.temp_banca).delete()
db.temp_banca.insert(banca=banca)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def accorpa(id,val):
d=db(db.fatture_scelte.id == id).select().first()
if "True" in str(val):
d.update_record(accorpa=True)
else:
d.update_record(accorpa=False)
return "ok"
def crea_indici_riba():
"""
Formato : id_cliente,lista(id_fatture)
"""
cliente = []
lista_riba=[]
fatture_accorpate = []
fatture=db(db.fatture_scelte).select()
for f in fatture:
id_cliente = f.id_cliente
fatture_accorpate = []
lista_fatture = []
if db((db.fatture_scelte.id_cliente == id_cliente) & (db.fatture_scelte.accorpa == 'T')).count() < 2:
"""
Nessuna fattura da accorpare per questo cliente
"""
lista_fatture.append(f.id_fattura)
pass
else:
da_accorpare = db((db.fatture_scelte.id_cliente == id_cliente) & (db.fatture_scelte.accorpa == 'T')).select()
for item in da_accorpare:
if not item in lista_fatture:
lista_fatture.append(item.id_fattura)
cliente = []
cliente.append(id_cliente)
cliente.append(lista_fatture)
if not cliente in lista_riba:
lista_riba.append(cliente)
return lista_riba
def ritorna_dettaglio_fattura(id_fattura):
fattura = db(db.fatture_salvate.id ==id_fattura).select().first()
msg = "Fattura numero "+fattura.numero_fattura +" Del " + fattura.data_fattura.strftime("%d/%m/%Y")+ " Tot. " + ritorna_prezzo_europeo(fattura.totale) + " <b>Scadenza</b> "+fattura.scadenza.strftime("%d/%m/%Y")
return msg
def ritorna_nome_cliente_da_id(id):
return db(db.clienti.id==id).select().first().nome
def ritorna_abi_nostra_banca_scelta():
banca_scelta = db(db.temp_banca).select().first().banca
return db(db.anagrafica_banche_azienda.descrizione == banca_scelta).select().first().codice_abi
def ritorna_cab_nostra_banca_scelta():
banca_scelta = db(db.temp_banca).select().first().banca
return db(db.anagrafica_banche_azienda.descrizione == banca_scelta).select().first().codice_cab
def ritorna_scadenza_e_totale_fattura_per_riba(id_fattura):
d = db(db.fatture_salvate.id == id_fattura).select().first()
scadenza = d.scadenza.strftime("%d%m%y")
totale = d.totale
# print "TOT : ".format(totale)
return scadenza,totale
def ritorna_abi_cab_da_cliente_id(cliente_id):
# print cliente_id
codice_banca = db(db.clienti.id == cliente_id).select().first().codice_banca
codice_abi=""
codice_cab=""
try:
d= db(db.anagrafica_banche.descrizione == codice_banca).select().first()
codice_abi=d.codice_abi
codice_cab=d.codice_cab
except:
pass
return d.codice_abi,d.codice_cab
def truncate_float(number, length):
"""Truncate float numbers, up to the number specified
in length that must be an integer"""
number = number * pow(10, length)
number = int(number)
number = float(number)
number /= pow(10, length)
return number
def crea_file_riba():
"""Numero Univoco per ogni file riba creato?"""
try:
numero_disposizione = db(db.numero_disposizioni_riba).select().first().numero
numero_disposizione = int(numero_disposizione)
except:
numero_disposizione = 1
"""Contenitore per il flusso CBI"""
flow = wrapper.Flow()
flow.header = wrapper.Record('IB')
flow.footer = wrapper.Record('EF')
codice_assegnato_dalla_sia_alla_azienda_emittente ="60I33"
codice_abi_banca_assuntrice = ritorna_abi_nostra_banca_scelta()
codice_cab_banca_assuntrice = ritorna_cab_nostra_banca_scelta()
data_creazione = datetime.datetime.now().date().strftime("%d/%m/%y").replace("/","")
nome_supporto = "OpenGest"
codice_divisa = "E"
flow.header['mittente'] = codice_assegnato_dalla_sia_alla_azienda_emittente
flow.header['ricevente'] = codice_abi_banca_assuntrice
flow.header['data_creazione'] = data_creazione
flow.header['nome_supporto'] = nome_supporto
flow.header['codice_divisa'] = codice_divisa
flow.footer['mittente']=codice_assegnato_dalla_sia_alla_azienda_emittente
flow.footer['ricevente']=codice_abi_banca_assuntrice
flow.footer['data_creazione']=data_creazione
flow.footer['nome_supporto']=nome_supporto
flow.footer['codice_divisa']=codice_divisa
numero_emissioni = crea_indici_riba()
# print "NUMERO EMISSIONI = {0} ".format(len(numero_emissioni))
flow.footer['numero_disposizioni']=str(len(numero_emissioni)).zfill(7)
totalissimo = 0
flow.disposals = []
for numero_progressivo in range(1,len(numero_emissioni) +1):
"""Contiene tutti e 7 i record"""
disposizione = wrapper.Disposal()
# print "QUI"
"""instanza ai vari record cbi"""
first_record = wrapper.Record('14')
second_record = wrapper.Record('20')
third_record = wrapper.Record('30')
fourth_record = wrapper.Record('40')
fifth_record = wrapper.Record('50')
fifty_one = wrapper.Record('51')
seventieth_record = wrapper.Record('70')
emissione_corrente = numero_emissioni[numero_progressivo - 1]
cliente_id = emissione_corrente[0]
fatture = emissione_corrente[1]
"""
Raccolta dati per il record 14 first_record
"""
codice_abi_domiciliaria,codice_cab_domiciliaria=ritorna_abi_cab_da_cliente_id(cliente_id)
codice_cliente_debitore = cliente_id
# print ritorna_abi_cab_da_cliente_id
importo_della_ricevuta_in_centesimi = 0
riferimento_fattura = ""
for id_fattura in fatture:
data_pagamento,totale = ritorna_scadenza_e_totale_fattura_per_riba(id_fattura)
importo_della_ricevuta_in_centesimi += float(totale)
totalissimo += importo_della_ricevuta_in_centesimi
riferimento_fattura+= db(db.fatture_salvate.id == id_fattura).select().first().numero_fattura+" del "+db(db.fatture_salvate.id == id_fattura).select().first().data_fattura.strftime("%d/%m/%Y") + " "
importo_della_ricevuta_in_centesimi = '%.2f' % round(importo_della_ricevuta_in_centesimi,2)
importo_della_ricevuta_in_centesimi = importo_della_ricevuta_in_centesimi.replace(".","").zfill(13)
# print "importo : {0}".format(importo_della_ricevuta_in_centesimi)
first_record['numero_progressivo']=str(numero_progressivo).zfill(7)
first_record['data_pagamento']=data_pagamento
first_record['importo']=str(importo_della_ricevuta_in_centesimi)
first_record['codice_abi_banca']=codice_abi_banca_assuntrice
first_record['cab_banca']=codice_cab_banca_assuntrice
first_record['codice_abi_domiciliaria']=codice_abi_domiciliaria
first_record['codice_cab_domiciliaria']=codice_cab_domiciliaria
first_record['codice_azienda']=codice_assegnato_dalla_sia_alla_azienda_emittente
first_record['codice_cliente_debitore']=codice_cliente_debitore
first_record['codice_divisa']=codice_divisa
first_record['causale']="30000"
first_record['segno']="-"
first_record['tipo_codice']="4"
second_record['numero_progressivo']=str(numero_progressivo).zfill(7)
second_record['1_segmento']="Microcarp"
second_record['2_segmento']="Strada statale 416"
second_record['3_segmento']="26020 Castelleone (CR)"
second_record['4_segmento']="Italia"
dati_cliente = db(db.clienti.id == cliente_id).select().first()
third_record['numero_progressivo'] = str(numero_progressivo).zfill(7)
third_record['codice_fiscale_cliente'] = dati_cliente.codice_fiscale
third_record['1_segmento'] = dati_cliente.nome[:27]
third_record['2_segmento'] = ""
fourth_record['numero_progressivo'] = str(numero_progressivo).zfill(7)
fourth_record['indirizzo'] = dati_cliente.indirizzo
fourth_record['cap'] = dati_cliente.cap
fourth_record['comune_e_sigla_provincia'] = dati_cliente.provincia
fourth_record['completamento_indirizzo'] = ""
fourth_record['codice_paese'] = "IT"
riferimento_fattura =(riferimento_fattura[:30] + '..') if len(riferimento_fattura) > 30 else riferimento_fattura
fifth_record['numero_progressivo'] =str(numero_progressivo).zfill(7)
fifth_record['1_segmento'] = "R.F. " + riferimento_fattura
fifth_record['2_segmento'] = "IMPORTO " + importo_della_ricevuta_in_centesimi
fifth_record['codifica_fiscale_creditore'] = str(dati_cliente.partita_iva)
fifty_one['numero_progressivo'] = str(numero_progressivo).zfill(7)
fifty_one['numero_ricevuta'] = str(numero_disposizione).zfill(10)
fifty_one['denominazione_creditore'] = "MICROCARP S.R.L."
seventieth_record['numero_progressivo'] = str(numero_progressivo).zfill(7)
numero_disposizione +=1
""" ALLA FINE DI TUTTI I RECORDS """
disposizione.records.append(first_record)
disposizione.records.append(second_record)
disposizione.records.append(third_record)
disposizione.records.append(fourth_record)
disposizione.records.append(fifth_record)
disposizione.records.append(fifty_one)
disposizione.records.append(seventieth_record)
flow.disposals.append(disposizione)
disposizione = None
# print "TOTALISSIMO {0}".format(totalissimo)
# totalissimo = '%.2f' % totalissimo
# totalissimo = str(totalissimo)[:]
totalissimo = str(truncate_float(totalissimo,2))
# print "TOTALISSIMO {0}".format(totalissimo)
totalissimo = totalissimo.replace(".","").zfill(15)
flow.footer['tot_importi_negativi']=totalissimo
flow.footer['tot_importi_positivi']="".zfill(15)
numero_record = str((len(numero_emissioni) * 7)+2).zfill(7)
flow.footer['numero_record']=numero_record
filename = os.getcwd()+"/applications/gestionale/static/"+"riba.txt"
try:
os.remove(filename)
except:
pass
flow.writefile(filename)
# print "LUNGHEZZA DISPOSIZIONE : ",len(flow.disposals)
db.numero_disposizioni_riba.insert(numero=str(numero_disposizione))
def genera_riba():
crea_file_riba()
nomefile = "riba.txt"
filename = os.getcwd()+"/applications/gestionale/static/"+"riba.txt"
import cStringIO
# import contenttype as c
s=cStringIO.StringIO()
with open(filename,"r") as file:
s.write(file.read())
response.headers['Content-Type'] =gluon.contenttype.contenttype(filename)
response.headers['Content-Disposition'] = "attachment; filename=%s" % nomefile
return s.getvalue()
def emissione_riba_3():
banca_scelta = db(db.temp_banca).select().first().banca
try:
numero_disposizione = db(db.numero_disposizioni_riba).select().first().numero
except:
numero_disposizione = 1
lista_riba = crea_indici_riba()
html ="""<table id="resoconto" class="table table-bordered">"""
html += """<thead>"""
html += """<tr>"""
html += """<th>"""
html += "Cliente"
html += """</th>"""
html += """<th>"""
html += "Dettaglio"
html += """</th>"""
html += """<th>"""
html += "Totale"
html += """</th>"""
html += """</tr>"""
html += """</thead>"""
html += """<tbody>"""
totale_distinta=0
errore = False
for item in lista_riba:
html += """<tr>"""
html += """<td>"""+ritorna_nome_cliente_da_id(item[0]) + """</td>"""
html += """<td>"""
banca_cliente = db(db.clienti.id==item[0]).select().first().codice_banca
dati_banca_cliente = db(db.anagrafica_banche.descrizione == banca_cliente).select().first()
if dati_banca_cliente is not None:
abi = dati_banca_cliente.codice_abi
cab = dati_banca_cliente.codice_cab
if abi is None or len(abi) !=5:
response.flash="La banca {0} collegata al cliente {1} non ha il codice ABI corretto".format(dati_banca_cliente.descrizione,ritorna_nome_cliente_da_id(item[0]))
errore = True
if cab is None or len(cab) !=5:
response.flash="La banca {0} collegata al cliente {1} non ha il codice CAB corretto".format(dati_banca_cliente.descrizione,ritorna_nome_cliente_da_id(item[0]))
errore = True
else:
response.flash="La banca {0} collegata al cliente {1} non è presente in anagrafica".format(banca_cliente,ritorna_nome_cliente_da_id(item[0]))
errore = True
totale = 0
for fatture in item[1]:
html += ritorna_dettaglio_fattura(fatture) +"<br>"
totale += float(db(db.fatture_salvate.id ==fatture).select().first().totale)
html += """</td>"""
html += """<td>"""
html += ritorna_prezzo_europeo(totale)
html += """</td>"""
totale_distinta += totale
html += """</tr>"""
# print "Cliente = ",ritorna_nome_cliente_da_id(item[0]) , "Fatture = ",item[1]
html += """</tbody>"""
html +="""</table>"""
html=XML(html)
indietro = avanti =""
if not errore:
indietro = A(BUTTON("Indietro"),_href=URL('emissione_riba_2'))
avanti = A(BUTTON("Crea e scarica file Riba"),_href=URL('genera_riba'))
totale_distinta = ritorna_prezzo_europeo(totale_distinta)
return locals()
def return_radio_button(id):
return XML("<input type='checkbox' id ='check"+str(id)+"' onclick='accorpa("+str(id)+");'></input>")
pass
def emissione_riba_2():
db.fatture_scelte.a = Field.Virtual('accorpa',lambda row: return_radio_button(row.fatture_scelte.id))
# db.fatture_scelte.a = Field.Virtual('radio','boolean')
fields=[db.fatture_scelte.numero_fattura,db.fatture_scelte.totale,db.fatture_scelte.cliente,db.fatture_scelte.scadenza,db.fatture_scelte.a]
# db.fatture_scelte.id.readable=False;
riba_form = SQLFORM.grid(db.fatture_scelte.user_id == auth.user_id,formname='riba_form',maxtextlength=100,create=False, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False, fields=fields)
button = A(BUTTON("Successivo"),_href=URL('emissione_riba_3'))
return locals()
def emissione_riba():
db(db.fatture_scelte.user_id == auth.user_id).delete()
banca_azienda = Set()
b = db(db.anagrafica_banche_azienda).select()
for e in b:
banca_azienda.add(e.descrizione)
return locals()
def ritorna_tipo_pagamento_da_fattura(fattura_id):
row = db(db.fatture_salvate.id == fattura_id).select().first()
scadenza = row.scadenza
ids = eval(row.id_ddt)
for ddt in ids:
try:
id_ordine = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt).select().first().id_ordine
pagamento = db(db.ordine_cliente.id == id_ordine).select().first().pagamento
except:
# print "ERRORE FATTURA ID ",fattura_id
pagamento = scadenza =""
return pagamento,scadenza
def anagrafica_clienti():
clienti_form = SQLFORM.grid(db.clienti,formname='clienti',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
clienti_form.element('.web2py_counter', replace=None)
try:
clienti_form.element('select[name=codice_banca]')['_style'] = 'width:350px;height:25px;'
clienti_form.element('input[name=luogo_consegna_1]')['_style'] = 'width:350px;height:25px;'
clienti_form.element('input[name=luogo_consegna_2]')['_style'] = 'width:350px;height:25px;'
clienti_form.element('input[name=luogo_consegna_3]')['_style'] = 'width:350px;height:25px;'
clienti_form.element('input[name=luogo_consegna_4]')['_style'] = 'width:350px;height:25px;'
clienti_form.element('input[name=luogo_consegna_5]')['_style'] = 'width:350px;height:25px;'
except:
pass
# articli_form = SQLFORM.grid(db.clienti,formname='articoli',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True)
return dict(clienti_form = clienti_form)
def anagrafica_fornitori():
fornitori_form = SQLFORM.grid(db.fornitori,formname='fornitori',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=4, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
fornitori_form.element('.web2py_counter', replace=None)
return dict(fornitori_form = fornitori_form)
def gestione_codici_causali():
form = SQLFORM.grid(db.codici_causali,formname='causali',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols')
form.element('.web2py_counter', replace=None)
return dict(form = form)
def gestione_codici_pagamenti():
form = SQLFORM.grid(db.codici_pagamenti,formname='pagamenti',maxtextlength=100,create=True, editable=True, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols')
form.element('.web2py_counter', replace=None)
return dict(form = form)
def anagrafica_piano_dei_conti():
anagrafica_piano_dei_conti_form = SQLFORM.grid(db.anagrafica_piano_dei_conti,formname='anagrafica_piano_dei_conti',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols')
# anagrafica_piano_dei_conti_form.element('.web2py_counter', replace=None)
return dict(anagrafica_piano_dei_conti_form = anagrafica_piano_dei_conti_form)
def index():
response.flash = T("Home page")
return dict(message=T(''))
@service.jsonrpc
@service.jsonrpc2
def return_listini(nome_cliente,tipo):
nomi_listini = db(db.anagrafica_listini.nome_cliente == nome_cliente,db.anagrafica_listini.tipologia_listino == tipo).select()
return nomi_listini.as_json()
@service.jsonrpc
@service.jsonrpc2
def return_pagamenti(*args):
nome = args[0]
if "cliente" in args[1]:
# print "Nome cliente ",nome_cliente
nomi_listini = db(db.clienti.nome == nome).select().first()["pagamento"]
else:
nomi_listini = db(db.fornitori.nome == nome).select().first()["pagamento"]
return nomi_listini
@service.jsonrpc
@service.jsonrpc2
def aggiorna_quantita(id_riga_ordine,codice_articolo,quantita_prodotta):
"""
questa quantità prodotta viene messa in relazione alla riga d'ordine.
la quantità prodotta viene sommata a quella in magazzino
Nell'anagrafica articoli viene visualizzata anche la quantità riservata
Quando si emette un ddt ricordarsi di cancellare dalla tabella riserva_quantita le righe d'ordine associate.
"""
record_giacenza_articolo_attuale = db(db.anagrafica_articoli.codice_articolo == str(codice_articolo)).select().first()
giacenza = int(record_giacenza_articolo_attuale.giacenza) + int (quantita_prodotta)
record_giacenza_articolo_attuale.update_record(giacenza = str(giacenza))
db.riserva_quantita.insert(codice_articolo=codice_articolo,quantita=quantita_prodotta,id_riga_ordine=id_riga_ordine,user_id=auth.user_id)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def riserva_giacenza(id_riga_ordine,da_riservare):
# print id_riga_ordine,da_riservare
data = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
codice_articolo = data.codice_articolo
id_ordine_cliente = data.id_ordine_cliente
data_articolo = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
# print data_articolo.giacenza,da_riservare
# data_articolo.update_record(giacenza = str(giacenza))
db.riserva_quantita.insert(codice_articolo=codice_articolo,quantita=da_riservare,id_riga_ordine=id_riga_ordine,user_id=auth.user_id)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def disdire_giacenza(id_riga_ordine,da_riservare):
# print id_riga_ordine,da_riservare
data = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
codice_articolo = data.codice_articolo
id_ordine_cliente = data.id_ordine_cliente
data_articolo = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
if int(ritorna_totale_prenotazione_da_codice_articolo(codice_articolo)) - int(da_riservare) <0:
return 1/0
da_riservare = int(da_riservare) *-1
db.riserva_quantita.insert(codice_articolo=codice_articolo,quantita=da_riservare,id_riga_ordine=id_riga_ordine,user_id=auth.user_id)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def aggiorna_giacenza(id_riga_ordine,da_riservare):
# print id_riga_ordine,da_riservare
data = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
codice_articolo = data.codice_articolo
id_ordine_cliente = data.id_ordine_cliente
data_articolo = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
try:
giacenza = int(da_riservare)
if giacenza < 0:
return 1/0
data_articolo.update_record(giacenza=str(giacenza))
except:
return 1/0
return "ok"
def return_dettagli_articolo_da_riga_ordine():
errore = False
riga_evasa = False
try:
id_riga_ordine =request.vars['id_riga_ordine']
data = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
codice_articolo = data.codice_articolo
id_ordine_cliente = data.id_ordine_cliente
quantita_ordine = data.quantita
data_articolo = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
ubicazione = data_articolo.ubicazione
if ubicazione is None:
ubicazione = "Nessuna"
data_ordine = db(db.ordine_cliente.id ==id_ordine_cliente).select().first()
codice_ordine = data_ordine.ultimo_codice_ordine
nome_cliente = data_ordine.nome_cliente
riferimento_ordine = data_ordine.riferimento_ordine_cliente
data_inserimento = data_ordine.data_inserimento
descrizione = data_articolo.descrizione
giacenza = data_articolo.giacenza
quantita_saldo = ritorna_quantita_saldo(id_riga_ordine)
prenotato = ritorna_totale_prenotazione_da_codice_articolo_e_riga_id(codice_articolo,id_riga_ordine)
# print "PRENOTATO = ",prenotato
# print "GIACENZA = ",giacenza
giacenza_non_riservata = int(giacenza) - int(prenotato)
# print "NON RISERVATA = ",giacenza_non_riservata
produzione_da_riservare_per_completare_la_produzione = int(quantita_saldo) - int(prenotato)
if produzione_da_riservare_per_completare_la_produzione < 1:
produzione_da_riservare_per_completare_la_produzione = "PRODUZIONE COMPLETATA\n" + "SURPLUS DI " +str(abs(produzione_da_riservare_per_completare_la_produzione)) + " ARTICOLI"
"""
"""
if int(quantita_saldo) <1:
quantita_saldo = "Quantità richiesta raggiunta"
if riga_completata(id_riga_ordine):
riga_evasa = True
ddts = return_ddts_for_row_id(id_riga_ordine)
except Exception, e:
# print e
errore = True
id_riga_ordine=""
codice_articolo = ""
descrizione =""
giacenza = ""
cliente = ""
codice_ordine = ""
quantita_ordine =""
prenotato =""
giacenza_non_riservata =""
produzione_da_riservare_per_completare_la_produzione=""
riferimento_ordine=""
data_inserimento=""
quantita_saldo=""
giacenza_non_riservata=""
produzione_da_riservare_per_completare_la_produzione=""
ubicazione=""
return locals()
return locals()
def return_dettagli_articolo_da_riga_ordine_per_cartellini():
errore = False
riga_evasa = False
try:
id_riga_ordine =request.vars['id_riga_ordine']
data = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
codice_articolo = data.codice_articolo
id_ordine_cliente = data.id_ordine_cliente
quantita_ordine = data.quantita
data_articolo = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first()
ubicazione = data_articolo.ubicazione
if ubicazione is None:
ubicazione = "Nessuna"
data_ordine = db(db.ordine_cliente.id ==id_ordine_cliente).select().first()
codice_ordine = data_ordine.ultimo_codice_ordine
nome_cliente = data_ordine.nome_cliente
riferimento_ordine = data_ordine.riferimento_ordine_cliente
data_inserimento = data_ordine.data_inserimento
descrizione = data_articolo.descrizione
giacenza = data_articolo.giacenza
quantita_saldo = ritorna_quantita_saldo(id_riga_ordine)
prenotato = ritorna_totale_prenotazione_da_codice_articolo_e_riga_id(codice_articolo,id_riga_ordine)
# print "PRENOTATO = ",prenotato
# print "GIACENZA = ",giacenza
giacenza_non_riservata = int(giacenza) - int(prenotato)
# print "NON RISERVATA = ",giacenza_non_riservata
produzione_da_riservare_per_completare_la_produzione = int(quantita_saldo) - int(prenotato)
if produzione_da_riservare_per_completare_la_produzione < 1:
produzione_da_riservare_per_completare_la_produzione = "PRODUZIONE COMPLETATA\n" + "SURPLUS DI " +str(abs(produzione_da_riservare_per_completare_la_produzione)) + " ARTICOLI"
"""
"""
if int(quantita_saldo) <1:
quantita_saldo = "Quantità richiesta raggiunta"
if riga_completata(id_riga_ordine):
riga_evasa = True
ddts = return_ddts_for_row_id(id_riga_ordine)
quantita_prodotta = return_quantity_for_row_id(id_riga_ordine)
except Exception, e:
# print e
errore = True
id_riga_ordine=""
codice_articolo = ""
descrizione =""
giacenza = ""
cliente = ""
codice_ordine = ""
quantita_ordine =""
prenotato =""
giacenza_non_riservata =""
produzione_da_riservare_per_completare_la_produzione=""
riferimento_ordine=""
data_inserimento=""
quantita_saldo=""
giacenza_non_riservata=""
produzione_da_riservare_per_completare_la_produzione=""
ubicazione=""
return locals()
return locals()
# return_dettagli_articolo_da_riga_ordine
def stampa_cartellini_1():
articoli_form = SQLFORM.grid(db.anagrafica_articoli,formname='articoli1',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,user_signature=True)
return locals()
def aggiorna_giacenze():
articoli_form = SQLFORM.grid(db.anagrafica_articoli,formname='articoli1',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=False,user_signature=True)
return locals()
@service.jsonrpc
@service.jsonrpc2
def return_description(cod):
rows = db(db.anagrafica_articoli.codice_articolo==cod).select().first()
return rows.descrizione
@service.jsonrpc
@service.jsonrpc2
def return_price(articolo,numero,listino,cliente):
prezzo_corrente = 0
start = 0
end = 0
h = HTMLParser()
cliente = h.unescape(cliente)
rows = db(db.articolo_in_listino.nome_cliente == cliente,db.articolo_in_listino == listino).select()
for row in rows:
if row['listino'] == listino:
if row['codice_articolo'] == articolo:
# print "OK"
end = int(row['numero_pezzi'])
if (int(numero) > start) and (int(numero) <= end):
prezzo_corrente = float(row['prezzo'])
start = end
# print prezzo_corrente
if prezzo_corrente == 0:
prezzo_corrente=""
return prezzo_corrente
@service.jsonrpc
@service.jsonrpc2
def return_price_fornitori(articolo,numero,listino,cliente):
prezzo_corrente = 0
start = 0
end = 0
# print "-----------------------------------"
h = HTMLParser()
cliente = h.unescape(cliente)
rows = db(db.articolo_in_listino_fornitori.nome_fornitore == cliente).select()
for row in rows:
# print "{0} {1} {2} {3}".format(row['nome_fornitore'],len(row['nome_fornitore']),cliente,len(cliente))
if row['listino'] == listino:
if row['codice_articolo'] == articolo:
# print "OK"
end = int(row['numero_pezzi'])
# print "Numero pezzi : ",end
if (int(numero) > start) and (int(numero) <= end):
# print "prezzo corrente : ",float(row['prezzo'])
prezzo_corrente = float(row['prezzo'])
start = end
# print prezzo_corrente
if prezzo_corrente == 0:
prezzo_corrente=""
return prezzo_corrente
@service.jsonrpc
@service.jsonrpc2
def search_piano_dei_conti(args):
return_data = []
gruppo = args[:-5]
conto = args[2:4]
sottoconto = args[4:]
gruppo_to_search=gruppo + "00000"
conto_to_search=gruppo+conto+"000"
sottoconto_to_search = gruppo + conto + sottoconto
descrizione_gruppo = ""
descrizione_conto = ""
descrizione_sottoconto = ""
# print gruppo_to_search,conto_to_search,sottoconto_to_search
try:
descrizione_gruppo = db(db.anagrafica_piano_dei_conti.codice_piano_dei_conti == gruppo_to_search).select().first()["descrizione_codice"]
except:
pass
if not conto_to_search == gruppo_to_search:
try:
descrizione_conto = db(db.anagrafica_piano_dei_conti.codice_piano_dei_conti == conto_to_search).select().first()["descrizione_codice"]
except:
pass
if not sottoconto_to_search == gruppo_to_search and not sottoconto_to_search == conto_to_search:
try:
descrizione_sottoconto = db(db.anagrafica_piano_dei_conti.codice_piano_dei_conti == sottoconto_to_search).select().first()["descrizione_codice"]
except:
pass
if len(descrizione_gruppo)<1:
gruppo_to_search=""
if len(descrizione_conto)<1:
conto_to_search=""
if len(descrizione_sottoconto)<1:
sottoconto_to_search=""
return_data.append(gruppo_to_search)
return_data.append(descrizione_gruppo)
return_data.append(conto_to_search)
return_data.append(descrizione_conto)
return_data.append(sottoconto_to_search)
return_data.append(descrizione_sottoconto)
# print return_data
return return_data
def ritorna_nome_cliente_da_riga_ordine(id_ordine):
# id_ordine_cliente=db(db.righe_in_ordine_cliente.id==id_riga_ordine).select().first()["id_ordine_cliente"]
try:
nome=db(db.ordine_cliente.id==id_ordine).select().first()["nome_cliente"]
except:
nome=""
return nome
def ritorna_nome_fornitore_da_riga_ordine(id_ordine):
# id_ordine_cliente=db(db.righe_in_ordine_cliente.id==id_riga_ordine).select().first()["id_ordine_cliente"]
try:
nome=db(db.ordine_fornitore.id==id_ordine).select().first()["nome_fornitore"]
except:
nome=""
return nome
def ritorna_ddt_da_id(ddt_id):
try:
ddt=db(db.saved_ddt.saved_ddt_id==ddt_id).select().first()["numero_ddt"]
except:
ddt=""
return ddt
def ritorna_ddt_da_id_fornitori(ddt_id):
try:
ddt=db(db.saved_ddt_fornitori.saved_ddt_id==ddt_id).select().first()["numero_ddt"]
except:
ddt=""
return ddt
def storico_articoli_prodotti_cron():
db(db.storico_articoli_prodotti).delete()
rows=db(db.saved_righe_in_ddt_cliente.codice_articolo !="commento").select()
for row in rows:
ddt=ddt=ritorna_ddt_da_id(row.saved_ddt_id)
if len(ddt)>0:
db.storico_articoli_prodotti.insert(cliente=ritorna_nome_cliente_da_riga_ordine(row.id_ordine),codice_ordine=row.codice_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,descrizione=row.descrizione,riferimento_ordine=row.riferimento_ordine,quantita=row.quantita,prezzo=row.prezzo,codice_iva=row.codice_iva,evasione=row.evasione,ddt=ddt)
return locals()
def storico_articoli_prodotti():
# db.saved_righe_in_ddt_cliente.nome_cliente=Field.Virtual("Cliente", lambda row: ritorna_nome_cliente_da_riga_ordine(row.saved_righe_in_ddt_cliente.id_ordine))
db.storico_articoli_prodotti.id.readable=False
articoli=SQLFORM.grid(db.storico_articoli_prodotti,formname='articoli',maxtextlength=100,create=False, deletable=False,editable=False,searchable=True,sortable=True,paginate=7, formstyle = 'table3cols',csv=False,user_signature=True)
return locals()
def storico_articoli_prodotti_fornitore_cron():
db(db.storico_articoli_prodotti_fornitore).delete()
rows=db(db.saved_righe_in_ddt_fornitore).select()
for row in rows:
ddt=ddt=ritorna_ddt_da_id_fornitori(row.saved_ddt_id)
if len(ddt)>0:
db.storico_articoli_prodotti_fornitore.insert(fornitore=ritorna_nome_fornitore_da_riga_ordine(row.id_ordine),codice_ordine=row.codice_ordine,n_riga=row.n_riga,codice_articolo=row.codice_articolo,descrizione=row.descrizione,riferimento_ordine=row.riferimento_ordine,quantita=row.quantita,prezzo=row.prezzo,codice_iva=row.codice_iva,evasione=row.evasione,ddt=ddt)
return locals()
def storico_articoli_prodotti_fornitore():
# db.saved_righe_in_ddt_cliente.nome_cliente=Field.Virtual("Cliente", lambda row: ritorna_nome_cliente_da_riga_ordine(row.saved_righe_in_ddt_cliente.id_ordine))
db.storico_articoli_prodotti_fornitore.id.readable=False
articoli=SQLFORM.grid(db.storico_articoli_prodotti_fornitore,formname='articoli',maxtextlength=100,create=False, deletable=False,editable=False,searchable=True,sortable=True,paginate=7, formstyle = 'table3cols',csv=False,user_signature=True)
return locals()
@service.jsonrpc
@service.jsonrpc2
def stampa_etichetta(*args):
cliente = args[0]
codice_articolo = args[1]
descrizione = args[2]
quantita= args[3]
lotto = args[4]
numero_etichette = args[5]
ordine = args[6]
contenitore = args[7]
# print quantita
# print contenitore
etichette_totali,ultima_capienza_contenitore = divmod(int(quantita),int(contenitore))
if ultima_capienza_contenitore == 0:
ultima_capienza_contenitore = contenitore
etichette_da_scrivere = etichette_totali
if etichette_totali == 1:
# print "qui"
etichette_totali ==0
else:
etichette_da_scrivere = etichette_totali +1
if True:
"""
if cliente == "new_global":
prn_file = request.folder + 'prn_labels/new_global.prn'
codice_articolo = codice_articolo[1:]
destinazione = args[8]
ordine +=destinazione
if cliente == "siat":
prn_file = request.folder + 'prn_labels/siat.prn'
if cliente == "mc":
prn_file = request.folder + 'prn_labels/mc.prn'
if cliente == "new_global_romania":
prn_file = request.folder + 'prn_labels/new_global_romania.prn'
codice_articolo = codice_articolo[1:]
destinazione = args[8]
ordine +=" "+destinazione
if "cimbali" in cliente:
prn_file = request.folder + 'prn_labels/cimbali.prn'
destinazione = args[8]
ordine +=destinazione
if "rhea" in cliente:
prn_file = request.folder + 'prn_labels/rhea.prn'
if codice_articolo[len(codice_articolo)-1].isdigit():
codice_articolo = "Z"+codice_articolo[:-2]
else:
codice_articolo = "Z" + codice_articolo[:-4] + codice_articolo[len(codice_articolo)-2:]
destinazione = args[8]
ordine +=destinazione
"""
prn_file = request.folder + 'prn_labels/mc.prn'
for x in range(etichette_totali):
_content = []
# print "IN FOR"
with open(prn_file, 'r') as content_file:
content = content_file.read()
content = content.replace("[*1*]", codice_articolo)
content = content.replace("[*2*]", descrizione)
content = content.replace("[*3*]", quantita)
content = content.replace("[*5*]", ordine)
content = content.replace("[*6*]", contenitore)
content = content.replace("[*10*]", str(x + 1))
content = content.replace("[*11*]", str(etichette_da_scrivere))
content = content.replace("[*12*]", cliente)
with open("/tmp/to#print.prn", 'w') as content_file:
content_file.write(content)
print_label(numero_etichette)
with open(prn_file, 'r') as content_file:
content = content_file.read()
if etichette_totali ==1:
with open(prn_file, 'r') as content_file:
content = content_file.read()
content = content.replace("[*1*]", codice_articolo)
content = content.replace("[*2*]", descrizione)
content = content.replace("[*3*]", quantita)
content = content.replace("[*5*]", ordine)
content = content.replace("[*6*]", str(ultima_capienza_contenitore))
content = content.replace("[*10*]", str(etichette_da_scrivere))
content = content.replace("[*11*]", str(etichette_da_scrivere))
content = content.replace("[*12*]", cliente)
with open("/tmp/to#print.prn", 'w') as content_file:
content_file.write(content)
print etichette_totali,ultima_capienza_contenitore
if etichette_totali >0 and not ultima_capienza_contenitore == contenitore:
print_label(numero_etichette)
def print_label(numero_etichette):
ip="192.168.0.208"
port = "9100"
prn_file = "/tmp/to#print.prn"
try:
numero = int(numero_etichette)
except:
numero = 1
for x in range(numero):
# command = "ncat --send-only "+ip+" "+port+" < "+prn_file
command = "nc "+ip+" "+port+" < "+prn_file
# print command
p = subprocess.Popen(command, shell=True)
p.wait()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_xml(args):
data={}
fattura=None
fattura=FatturaXml()
articoli=set([])
partitaIvaCarpal="01619570193"
codiceFiscaleCarpal="01619570193"
denominazioneCarpal="MICROCARP S.R.L."
indirizzoCarpal="Strada Statale 415"
capCarpal="26012"
provinciaCarpal="CR"
paeseCarpal="Castelleone"
# Progressivo Invio
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
numeroDocumento=str(numero)
progressivoInvio=numero_fattura_da_salvare
"""
Dati cliente
"""
id_cliente=args['0']
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
codiceDestinatario=dati_cliente.codiceDestinatario
pecDestinatario=dati_cliente.pec
dichiarazione=dati_cliente.descrizione_esenzione_iva
bollo_interno=dati_cliente.bollo
esigibilitaIva="I"
if "leonardo" in nome_cliente.lower():
esigibilitaIva="S"
try:
ddt = db(db.ddt_da_fatturare.user_id == auth.user_id).select().first()
ddt_id=ddt.ddt_id
print "Dettaglio ddt",ddt
numero_ddt=ddt.numero_ddt
data_emissione_ddt=ddt.data_emissione
print data_emissione_ddt
data_emissione_ddt=datetime.datetime.strptime(data_emissione_ddt,"%d/%m/%Y")
fattura.addSingleDdt(numero_ddt,data_emissione_ddt.strftime("%Y-%m-%d"))
righe=db(db.saved_righe_in_ddt_cliente.saved_ddt_id==ddt_id).select().first()
id_ordine=righe.id_ordine
dati_ordine=db(db.ordine_cliente.id==id_ordine).select().first()
print dati_ordine
ente=dati_ordine.ente
idOrdineAcquisto=dati_ordine.riferimento_ordine_cliente
cig=dati_ordine.cig
cup=dati_ordine.cup
if cig is not None or cup is not None:
fattura.addOrdineAcquisto(idOrdineAcquisto,cig,cup)
print "Trovata ente : "+ente
if "ETN" in ente:
codiceDestinatario="DL33NSJ"
if "SAS" in ente:
codiceDestinatario="OXPJRM5"
if "SSI" in ente:
codiceDestinatario="RUZUQNZ"
except:
data['msg']="Impossibile recuperare ente per "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if bollo_interno:
fattura.addBollo()
if dichiarazione is not None:
if len(dichiarazione)>0:
fattura.addDichiarazione(dichiarazione)
if codiceDestinatario is None and pecDestinatario is None:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if len(codiceDestinatario)<5 and len(pecDestinatario)<5:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if pi_cliente is None:
data['msg']="Inserire la partita iva per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
fattura.addDatiTrasmissione("IT",codiceFiscaleCarpal,progressivoInvio,codiceDestinatario,pecDestinatario)
fattura.addCedentePrestatore("IT",partitaIvaCarpal,denominazioneCarpal)
fattura.addSedeCedentePrestatore(indirizzoCarpal,capCarpal,paeseCarpal,provinciaCarpal,"IT")
# Dati cliente
fattura.addCessionarioCommittente("IT",pi_cliente.replace("IT",""),nome_cliente)
fattura.addSedeCessionarioCommittente(indirizzo_cliente,cap_cliente,citta_cliente,provincia_cliente,"IT")
tipoDocumento=ritornaTipoDiPagamento(args['1'])
# Calcolo data fattura
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
# Creazione descrizione fattura
descrizione_fattura=""
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for ddt_id in ddts_id:
descrizione_fattura += "Rif. DDT : " + ddt_id.numero_ddt + " del " + ddt_id.data_emissione+" "
fattura.addDatiGeneraliDocumento(tipoDocumento,fixDate(start_date.strftime("%d-%m-%Y")),numeroDocumento,descrizione_fattura)
# Controllare se ci possono essere più rate di pagamento
"""
if len(righeDataScadenza)>1:
pagamento="TP01"
else:
pagamento="TP02"
"""
# Per ora metto sempre solo 1 rata
pagamento="TP02"
fattura.addCondizioniPagamento(pagamento)
articoli=[]
for ddt_id in ddts_id:
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id.ddt_id).select()
for row in rows:
if not "commento" in row.codice_articolo:
articolo=[]
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
# fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),pagamento,str(scadenza))
codice_articolo=row.codice_articolo
descrizione=row.descrizione
um=row.u_m
qta=row.quantita
codice_iva=row.codice_iva
riferimento_ordine=row.riferimento_ordine
prezzo=row.prezzo
n_riga=str(row.n_riga)
descrizione+=" Pos. "+n_riga
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["percentuale_iva"]
codice_iva_interno=db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["codice_iva"]
bollo = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["bollo_su_importi_esenti"]
# print codice_articolo,descrizione,um,qta,prezzo,riferimento_ordine,pagamento,scadenza
articolo.append(codice_articolo)
articolo.append(descrizione)
articolo.append(codice_iva)
articolo.append(percentuale_iva)
articolo.append(bollo)
articolo.append(um)
articolo.append(qta)
articolo.append(controllaPrezzo(prezzo))
articolo.append(riferimento_ordine)
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
add=True
for a in articoli:
# print a,articolo,a==articolo
if a==articolo:
add=False
break
if add:
articoli.append(articolo)
articolo=[]
except Exception,e:
data['msg']="Controllare tipo pagamento per cliente "+str(nome_cliente)+str(e)
data['error']=True
return json.dumps(data)
if bollo_interno:
articolo=[]
codice_iva_interno="54"
articolo.append("")
articolo.append("Imposta di bollo assolta in modo virtuale ex DM 17/06/2014")
articolo.append("Esente Iva")
articolo.append(0.00)
articolo.append("")
articolo.append("Nr")
articolo.append("1")
articolo.append("2.00")
articolo.append("")
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
articoli.append(articolo)
def ritornaImponibile(qta,prezzo):
imponibile= float(qta)*float(prezzo)
print imponibile,float("%0.2f"%imponibile)
return float("%0.2f"%imponibile)
def ritornaTotaleArticoli(articoli):
totale=0.0
for articolo in articoli:
imponibile=ritornaImponibile(articolo[6],articolo[7])
percentualeIva=articolo[3]
totaleIvaInclusa=imponibile + (imponibile*percentualeIva)/100
totale+=totaleIvaInclusa
return str("{:.2f}".format(totale))
# Dettagilio Pagamento
articolo=articoli[0]
dataToFix=articolo[10]
d=dataToFix.split("/")
if len(d[1])==1:
d[1]="0"+d[1]
if len(d[0])==1:
d[0]="0"+d[1]
d=d[2]+"-"+d[1]+"-"+d[0]
fattura.addDettaglioPagamento(articolo[11],d,ritornaTotaleArticoli(articoli))
print "Totale iva inclusa : ",ritornaTotaleArticoli(articoli)
# TotalerigheCodiciIva
db(db.anagrafica_codici_iva).select()
TotaleRigheCodiciIva={}
for articolo in articoli:
percentuale_iva=articolo[3]
codice_iva_interno=articolo[12]
imponibile=ritornaImponibile(articolo[6],articolo[7])
if not TotaleRigheCodiciIva.has_key(codice_iva_interno):
TotaleRigheCodiciIva[codice_iva_interno] = imponibile
else:
TotaleRigheCodiciIva[codice_iva_interno] = TotaleRigheCodiciIva[codice_iva_interno] + imponibile
print TotaleRigheCodiciIva
for k in TotaleRigheCodiciIva:
print "ALIQUOTA IVA : ",k
aliquota_iva = db(db.anagrafica_codici_iva.codice_iva == k).select().first()["percentuale_iva"]
imponibile=TotaleRigheCodiciIva[k]
if k=="22":
aliquota_iva="22.00"
descrizione_imposta=""
if esigibilitaIva=="S":
descrizione_imposta=scritta_esenzione_cliente
imposta=(imponibile*22.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),esigibilitaIva,descrizione_imposta,k)
if k=="10":
aliquota_iva="10.00"
descrizione_imposta=""
imposta=(imponibile*10.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),esigibilitaIva,descrizione_imposta,k)
if k=="53":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),esigibilitaIva,descrizione_imposta,k)
print "sono qui"
if k=="54":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
numero_linea=1
for articolo in articoli:
if "22" in articolo[12]:
aliquota="22.00"
elif "10" in articolo[12]:
aliquota="10.00"
else:
aliquota="0.00"
descrizione=articolo[0]+" "+articolo[1]+" Ord. "+articolo[8] #riferimento ordine
qta=fixPrezzo(articolo[6])+".00"
prezzo=str(articolo[7])
codice_iva=str(articolo[12])
importo=str("{:.2f}".format(ritornaImponibile(qta,prezzo)))
fattura.addLinea(str(numero_linea),descrizione,qta,prezzo,importo,aliquota,codice_iva)
numero_linea+=1
nome_file=fattura.writeXml()
# cwd = os.getcwd()+"/applications/gestionale/uploads/fatture/"
# id_cliente=args['0']
# tipo_fattura=args['1']
# data['error']=None
data['msg']="Tutapost"
data['filename']=nome_file
return json.dumps(data)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_xml_istantanea(args):
data={}
fattura=None
fattura=FatturaXml()
articoli=set([])
partitaIvaCarpal="01619570193"
codiceFiscaleCarpal="01619570193"
denominazioneCarpal="MICROCARP S.R.L."
indirizzoCarpal="Strada Statale 415"
capCarpal="26012"
provinciaCarpal="CR"
paeseCarpal="Castelleone"
# Progressivo Invio
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
numeroDocumento=str(numero)
progressivoInvio=numero_fattura_da_salvare
"""
Dati cliente
"""
id_cliente=args['0']
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
codiceDestinatario=dati_cliente.codiceDestinatario
pecDestinatario=dati_cliente.pec
bollo_interno=dati_cliente.bollo
dichiarazione=dati_cliente.descrizione_esenzione_iva
if dichiarazione is not None:
if len(dichiarazione)>0:
fattura.addDichiarazione(dichiarazione)
if codiceDestinatario is None and pecDestinatario is None:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if pi_cliente is None:
data['msg']="Inserire la partita iva per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if len(codiceDestinatario)<5 and len(pecDestinatario)<5:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if bollo_interno:
fattura.addBollo()
fattura.addDatiTrasmissione("IT",codiceFiscaleCarpal,progressivoInvio,codiceDestinatario,pecDestinatario)
fattura.addCedentePrestatore("IT",partitaIvaCarpal,denominazioneCarpal)
fattura.addSedeCedentePrestatore(indirizzoCarpal,capCarpal,paeseCarpal,provinciaCarpal,"IT")
# Dati cliente
fattura.addCessionarioCommittente("IT",pi_cliente.replace("IT",""),nome_cliente)
fattura.addSedeCessionarioCommittente(indirizzo_cliente,cap_cliente,citta_cliente,provincia_cliente,"IT")
tipoDocumento=ritornaTipoDiPagamento(args['1'])
# Calcolo data fattura
"""
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
"""
start_date = datetime.datetime.now()
# Creazione descrizione fattura
descrizione_fattura="Fattura Immediata"
fattura.addDatiGeneraliDocumento(tipoDocumento,fixDate(start_date.strftime("%d-%m-%Y")),numeroDocumento,descrizione_fattura)
# Controllare se ci possono essere più rate di pagamento
"""
if len(righeDataScadenza)>1:
pagamento="TP01"
else:
pagamento="TP02"
"""
# Per ora metto sempre solo 1 rata
pagamento="TP02"
fattura.addCondizioniPagamento(pagamento)
articoli=[]
articolo=[]
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
print "Pagamento :",pagamento
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
print "Scadenza : ",scadenza
print "qui prima articolo"
print row
codice_articolo=row.codice_articolo
descrizione=row.descrizione
um=row.u_m
qta=row.qta
codice_iva=row.codice_iva
print "Codice iva",codice_iva
riferimento_ordine=row.riferimento_ordine
prezzo=row.prezzo
print "qui dopo articolo"
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["percentuale_iva"]
codice_iva_interno=db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["codice_iva"]
bollo = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["bollo_su_importi_esenti"]
articolo.append(codice_articolo)
articolo.append(descrizione)
articolo.append(codice_iva)
articolo.append(percentuale_iva)
articolo.append(bollo)
articolo.append(um)
articolo.append(qta)
articolo.append(controllaPrezzo(prezzo))
articolo.append(riferimento_ordine)
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
add=True
for a in articoli:
print a,articolo,a==articolo
if a==articolo:
add=False
break
if add:
articoli.append(articolo)
articolo=[]
except Exception,e:
data['msg']="Controllare tipo pagamento per cliente "+str(nome_cliente)+str(e)
data['error']=True
return json.dumps(data)
if bollo_interno:
articolo=[]
codice_iva_interno="54"
articolo.append("")
articolo.append("Imposta di bollo assolta in modo virtuale ex DM 17/06/2014")
articolo.append("Esente Iva")
articolo.append(0.00)
articolo.append("")
articolo.append("Nr")
articolo.append("1")
articolo.append("2.00")
articolo.append("")
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
articoli.append(articolo)
print articoli
def ritornaImponibile(qta,prezzo):
imponibile= float(qta)*float(prezzo)
return float("%0.2f"%imponibile)
def ritornaTotaleArticoli(articoli):
totale=0.0
for articolo in articoli:
imponibile=ritornaImponibile(articolo[6],articolo[7])
percentualeIva=articolo[3]
totaleIvaInclusa=imponibile + (imponibile*percentualeIva)/100
totale+=totaleIvaInclusa
return str("{:.2f}".format(totale))
# Dettagilio Pagamento
articolo=articoli[0]
dataToFix=articolo[10]
d=dataToFix.split("/")
if len(d[1])==1:
d[1]="0"+d[1]
if len(d[0])==1:
d[0]="0"+d[1]
d=d[2]+"-"+d[1]+"-"+d[0]
fattura.addDettaglioPagamento(articolo[11],d,ritornaTotaleArticoli(articoli))
print articolo[11],d,ritornaTotaleArticoli(articoli)
print "Totale iva inclusa : ",ritornaTotaleArticoli(articoli)
# TotalerigheCodiciIva
db(db.anagrafica_codici_iva).select()
TotaleRigheCodiciIva={}
for articolo in articoli:
print articolo
percentuale_iva=articolo[3]
codice_iva_interno=articolo[12]
imponibile=ritornaImponibile(articolo[6],articolo[7])
if not TotaleRigheCodiciIva.has_key(codice_iva_interno):
TotaleRigheCodiciIva[codice_iva_interno] = imponibile
else:
TotaleRigheCodiciIva[codice_iva_interno] = TotaleRigheCodiciIva[codice_iva_interno] + imponibile
for k in TotaleRigheCodiciIva:
aliquota_iva = db(db.anagrafica_codici_iva.codice_iva == k).select().first()["percentuale_iva"]
imponibile=TotaleRigheCodiciIva[k]
if k=="22":
aliquota_iva="22.00"
descrizione_imposta=""
imposta=(imponibile*22.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
if k=="10":
aliquota_iva="10.00"
descrizione_imposta=""
imposta=(imponibile*10.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
if k=="53":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
if k=="54":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
numero_linea=1
for articolo in articoli:
if "22" in articolo[12]:
aliquota="22.00"
elif "10" in articolo[12]:
aliquota="10.00"
else:
aliquota="0.00"
descrizione=articolo[0]+" "+articolo[1]+" "+articolo[8] #riferimento ordine
qta=fixPrezzo(articolo[6])+".00"
prezzo=str(articolo[7])
codice_iva=str(articolo[12])
importo=str("{:.2f}".format(ritornaImponibile(qta,prezzo)))
fattura.addLinea(str(numero_linea),descrizione,qta,prezzo,importo,aliquota,codice_iva)
numero_linea+=1
nome_file=fattura.writeXml()
# cwd = os.getcwd()+"/applications/gestionale/uploads/fatture/"
# id_cliente=args['0']
# tipo_fattura=args['1']
# data['error']=None
data['msg']="Tutapost"
data['filename']=nome_file
return json.dumps(data)
def controllaPrezzo(prezzo):
p = str(prezzo)
if "." not in p:
p+=".00"
return p
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_xml_accredito(args):
data={}
fattura=None
fattura=FatturaXml()
articoli=set([])
partitaIvaCarpal="01619570193"
codiceFiscaleCarpal="01619570193"
denominazioneCarpal="MICROCARP S.R.L."
indirizzoCarpal="Strada Statale 415"
capCarpal="26012"
provinciaCarpal="CR"
paeseCarpal="Castelleone"
# Progressivo Invio
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
numeroDocumento=str(numero)
progressivoInvio=numero_fattura_da_salvare
"""
Dati cliente
"""
id_cliente=args['0']
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
codiceDestinatario=dati_cliente.codiceDestinatario
pecDestinatario=dati_cliente.pec
bollo_interno=dati_cliente.bollo
dichiarazione=dati_cliente.descrizione_esenzione_iva
if dichiarazione is not None:
if len(dichiarazione)>0:
fattura.addDichiarazione(dichiarazione)
"""
arguments['1']='accredito'
arguments['2']=ente
arguments['3'] =causale
arguments['4'] = riferimento_ordine
arguments['5'] = cig
arguments['6'] = cup
"""
causale=args['3']
riferimento_ordine=args['4']
cig=args['5']
cup=args['6']
fattura.addOrdineAcquisto(riferimento_ordine,cig,cup)
esigibilitaIva="I"
if "leonardo" in nome_cliente.lower():
esigibilitaIva="S"
try:
ente=args['2']
print "Trovata ente : "+ente
if "ETN" in ente:
codiceDestinatario="DL33NSJ"
if "SAS" in ente:
codiceDestinatario="OXPJRM5"
if "SSI" in ente:
codiceDestinatario="RUZUQNZ"
except:
data['msg']="Impossibile recuperare ente per "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if codiceDestinatario is None and pecDestinatario is None:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if pi_cliente is None:
data['msg']="Inserire la partita iva per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if len(codiceDestinatario)<5 and len(pecDestinatario)<5:
data['msg']="Inserire codice destinatario o pec per il cliente "+str(nome_cliente)
data['error']=True
return json.dumps(data)
if bollo_interno:
fattura.addBollo()
fattura.addDatiTrasmissione("IT",codiceFiscaleCarpal,progressivoInvio,codiceDestinatario,pecDestinatario)
fattura.addCedentePrestatore("IT",partitaIvaCarpal,denominazioneCarpal)
fattura.addSedeCedentePrestatore(indirizzoCarpal,capCarpal,paeseCarpal,provinciaCarpal,"IT")
# Dati cliente
fattura.addCessionarioCommittente("IT",pi_cliente.replace("IT",""),nome_cliente)
fattura.addSedeCessionarioCommittente(indirizzo_cliente,cap_cliente,citta_cliente,provincia_cliente,"IT")
tipoDocumento=ritornaTipoDiPagamento(args['1'])
# Calcolo data fattura
"""
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
"""
start_date = datetime.datetime.now()
# Creazione descrizione fattura
descrizione_fattura=causale
fattura.addDatiGeneraliDocumento(tipoDocumento,fixDate(start_date.strftime("%d-%m-%Y")),numeroDocumento,descrizione_fattura)
# Controllare se ci possono essere più rate di pagamento
"""
if len(righeDataScadenza)>1:
pagamento="TP01"
else:
pagamento="TP02"
"""
# Per ora metto sempre solo 1 rata
pagamento="TP02"
fattura.addCondizioniPagamento(pagamento)
articoli=[]
articolo=[]
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
print "Pagamento :",pagamento
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
print "Scadenza : ",scadenza
print "qui prima articolo"
print row
codice_articolo=row.codice_articolo
descrizione=row.descrizione
um=row.u_m
qta=row.qta
codice_iva=row.codice_iva
riferimento_ordine=row.riferimento_ordine
prezzo=row.prezzo
print "qui dopo articolo"
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["percentuale_iva"]
codice_iva_interno=db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["codice_iva"]
bollo = db(db.anagrafica_codici_iva.descrizione_codice_iva == codice_iva).select().first()["bollo_su_importi_esenti"]
articolo.append(codice_articolo)
articolo.append(descrizione)
articolo.append(codice_iva)
articolo.append(percentuale_iva)
articolo.append(bollo)
articolo.append(um)
articolo.append(qta)
articolo.append(controllaPrezzo(prezzo))
articolo.append(riferimento_ordine)
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
add=True
for a in articoli:
print a,articolo,a==articolo
if a==articolo:
add=False
break
if add:
if "commento" not in articolo[0]:
articoli.append(articolo)
articolo=[]
except Exception,e:
data['msg']="Controllare tipo pagamento per cliente "+str(nome_cliente)+str(e)
data['error']=True
return json.dumps(data)
if bollo_interno:
articolo=[]
codice_iva_interno="54"
articolo.append("")
articolo.append("Imposta di bollo assolta in modo virtuale ex DM 17/06/2014")
articolo.append("Esente Iva")
articolo.append(0.00)
articolo.append("")
articolo.append("Nr")
articolo.append("1")
articolo.append("2.00")
articolo.append("")
articolo.append(pagamento)
articolo.append(scadenza)
articolo.append(ritornaCondizioniPagamento(pagamento))
articolo.append(codice_iva_interno)
articoli.append(articolo)
def ritornaImponibile(qta,prezzo):
try:
imponibile= float(qta)*float(prezzo)
print imponibile,float("%0.2f"%imponibile)
return round(imponibile,2)
except:
return 0
def ritornaTotaleArticoli(articoli):
totale=0.0
for articolo in articoli:
try:
imponibile=ritornaImponibile(articolo[6],articolo[7])
percentualeIva=articolo[3]
totaleIvaInclusa=imponibile + (imponibile*percentualeIva)/100
totale+=totaleIvaInclusa
except Exception,e:
print e
pass
return str("{:.2f}".format(totale))
# Dettagilio Pagamento
articolo=articoli[0]
dataToFix=articolo[10]
d=dataToFix.split("/")
if len(d[1])==1:
d[1]="0"+d[1]
if len(d[0])==1:
d[0]="0"+d[1]
d=d[2]+"-"+d[1]+"-"+d[0]
fattura.addDettaglioPagamento(articolo[11],d,ritornaTotaleArticoli(articoli))
print "Totale iva inclusa : ",ritornaTotaleArticoli(articoli)
# TotalerigheCodiciIva
db(db.anagrafica_codici_iva).select()
TotaleRigheCodiciIva={}
for articolo in articoli:
percentuale_iva=articolo[3]
codice_iva_interno=articolo[12]
imponibile=ritornaImponibile(articolo[6],articolo[7])
if not TotaleRigheCodiciIva.has_key(codice_iva_interno):
TotaleRigheCodiciIva[codice_iva_interno] = imponibile
else:
TotaleRigheCodiciIva[codice_iva_interno] = TotaleRigheCodiciIva[codice_iva_interno] + imponibile
for k in TotaleRigheCodiciIva:
aliquota_iva = db(db.anagrafica_codici_iva.codice_iva == k).select().first()["percentuale_iva"]
imponibile=TotaleRigheCodiciIva[k]
if k=="22":
aliquota_iva="22.00"
descrizione_imposta=""
if esigibilitaIva=="S":
descrizione_imposta=scritta_esenzione_cliente
imposta=(imponibile*22.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)), esigibilitaIva,descrizione_imposta,k)
if k=="10":
aliquota_iva="10.00"
descrizione_imposta=""
imposta=(imponibile*10.0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),esigibilitaIva,descrizione_imposta,k)
if k=="53":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),esigibilitaIva,descrizione_imposta,k)
if k=="54":
aliquota_iva="0.00"
descrizione_imposta=db(db.anagrafica_codici_iva.codice_iva == k).select().first()["descrizione"]
imposta=(imponibile*0)/100
fattura.addDatiRiepilogo(aliquota_iva,str("{:.2f}".format(imponibile)),str("{:.2f}".format(imposta)),"I",descrizione_imposta,k)
numero_linea=1
for articolo in articoli:
if "22" in articolo[12]:
aliquota="22.00"
elif "10" in articolo[12]:
aliquota="10.00"
else:
aliquota="0.00"
descrizione=articolo[0]+" "+articolo[1]+" "+articolo[8] #riferimento ordine
qta=fixPrezzo(articolo[6])+".00"
prezzo=str(articolo[7])
codice_iva=str(articolo[12])
importo=str("{:.2f}".format(ritornaImponibile(qta,prezzo)))
fattura.addLinea(str(numero_linea),descrizione,qta,prezzo,importo,aliquota,codice_iva)
numero_linea+=1
nome_file=fattura.writeXml()
# cwd = os.getcwd()+"/applications/gestionale/uploads/fatture/"
# id_cliente=args['0']
# tipo_fattura=args['1']
# data['error']=None
data['msg']="Tutapost"
data['filename']=nome_file
return json.dumps(data)
| 40.013103
| 517
| 0.578945
| 34,179
| 314,543
| 5.073232
| 0.031715
| 0.010634
| 0.008253
| 0.011303
| 0.869797
| 0.841844
| 0.81787
| 0.803377
| 0.78261
| 0.770568
| 0
| 0.011871
| 0.313305
| 314,543
| 7,860
| 518
| 40.018193
| 0.790916
| 0.074193
| 0
| 0.779916
| 0
| 0
| 0.060625
| 0.005273
| 0.000442
| 0
| 0
| 0
| 0
| 0
| null | null | 0.00929
| 0.046671
| null | null | 0.012165
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5275044028f587d66417f5ac8aeafec69e3ceef8
| 4,160
|
py
|
Python
|
rdmo/projects/tests/test_view_project_join.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/projects/tests/test_view_project_join.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/projects/tests/test_view_project_join.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
import pytest
from django.contrib.auth import get_user_model
from django.urls import reverse
from ..models import Invite, Membership, Project
membership_roles = ('owner', 'manager', 'author', 'guest')
@pytest.fixture()
def use_project_invite_timeout(settings):
settings.PROJECT_INVITE_TIMEOUT = 0
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join(db, client, membership_role):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=user, role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
assert response.status_code == 302
assert Membership.objects.get(project=project, user=user, role=membership_role)
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join_mail(db, client, membership_role):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=None, role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
assert response.status_code == 302
assert Membership.objects.get(project=project, user=user, role=membership_role)
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join_mail_existing_user(db, client, membership_role):
client.login(username='author', password='author')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='author')
invite = Invite(project=project, user=None, role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
membership = Membership.objects.get(project=project, user=user)
assert response.status_code == 302
assert membership.role == 'author'
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join_error(db, client, membership_role):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=user, role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=['wrong'])
response = client.get(url)
assert response.status_code == 200
assert b'is not valid' in response.content
assert not Membership.objects.filter(project=project, user=user, role=membership_role).exists()
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join_timeout_error(db, client, membership_role, use_project_invite_timeout):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=user, role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
assert response.status_code == 200
assert b'expired' in response.content
assert not Membership.objects.filter(project=project, user=user, role=membership_role).exists()
@pytest.mark.parametrize('membership_role', membership_roles)
def test_project_join_user_error(db, client, membership_role):
client.login(username='user', password='user')
project = Project.objects.get(id=1)
user = get_user_model().objects.get(username='user')
invite = Invite(project=project, user=get_user_model().objects.get(username='guest'), role=membership_role)
invite.make_token()
invite.save()
url = reverse('project_join', args=[invite.token])
response = client.get(url)
assert response.status_code == 200
assert b'guest' in response.content
assert not Membership.objects.filter(project=project, user=user, role=membership_role).exists()
| 34.666667
| 111
| 0.736058
| 541
| 4,160
| 5.487985
| 0.118299
| 0.113169
| 0.072752
| 0.066689
| 0.880431
| 0.871337
| 0.871337
| 0.818794
| 0.818794
| 0.818794
| 0
| 0.006946
| 0.134856
| 4,160
| 119
| 112
| 34.957983
| 0.818005
| 0
| 0
| 0.72619
| 0
| 0
| 0.072837
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.083333
| false
| 0.071429
| 0.047619
| 0
| 0.130952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
bff77e2d947828f8821ccfe39ceda6bf4aecb03d
| 529
|
py
|
Python
|
notebook/numpy_tile.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/numpy_tile.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/numpy_tile.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import numpy as np
a = np.array([0, 1, 2, 3])
print(np.tile(a, 2))
# [0 1 2 3 0 1 2 3]
print(np.tile(a, (3, 2)))
# [[0 1 2 3 0 1 2 3]
# [0 1 2 3 0 1 2 3]
# [0 1 2 3 0 1 2 3]]
print(np.tile(a, (2, 1)))
# [[0 1 2 3]
# [0 1 2 3]]
a = np.array([[11, 12], [21, 22]])
print(np.tile(a, 2))
# [[11 12 11 12]
# [21 22 21 22]]
print(np.tile(a, (3, 2)))
# [[11 12 11 12]
# [21 22 21 22]
# [11 12 11 12]
# [21 22 21 22]
# [11 12 11 12]
# [21 22 21 22]]
print(np.tile(a, (2, 1)))
# [[11 12]
# [21 22]
# [11 12]
# [21 22]]
| 14.694444
| 34
| 0.463138
| 132
| 529
| 1.856061
| 0.121212
| 0.089796
| 0.134694
| 0.179592
| 0.791837
| 0.791837
| 0.77551
| 0.706122
| 0.673469
| 0.587755
| 0
| 0.37467
| 0.283554
| 529
| 35
| 35
| 15.114286
| 0.271768
| 0.485822
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.666667
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
8757d886c4dd91956ae44a43a5275e9819ae25c3
| 78,539
|
py
|
Python
|
sdk/python/pulumi_google_native/storage/v1/outputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/storage/v1/outputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/storage/v1/outputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'BucketAccessControlProjectTeamResponse',
'BucketAccessControlResponse',
'BucketAutoclassResponse',
'BucketBillingResponse',
'BucketCorsItemResponse',
'BucketCustomPlacementConfigResponse',
'BucketEncryptionResponse',
'BucketIamConfigurationBucketPolicyOnlyResponse',
'BucketIamConfigurationResponse',
'BucketIamConfigurationUniformBucketLevelAccessResponse',
'BucketIamPolicyBindingsItemResponse',
'BucketLifecycleResponse',
'BucketLifecycleRuleItemActionResponse',
'BucketLifecycleRuleItemConditionResponse',
'BucketLifecycleRuleItemResponse',
'BucketLoggingResponse',
'BucketObjectCustomerEncryptionResponse',
'BucketObjectOwnerResponse',
'BucketOwnerResponse',
'BucketRetentionPolicyResponse',
'BucketVersioningResponse',
'BucketWebsiteResponse',
'DefaultObjectAccessControlProjectTeamResponse',
'ExprResponse',
'ObjectAccessControlProjectTeamResponse',
'ObjectAccessControlResponse',
'ObjectIamPolicyBindingsItemResponse',
]
@pulumi.output_type
class BucketAccessControlProjectTeamResponse(dict):
"""
The project team associated with the entity, if any.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "projectNumber":
suggest = "project_number"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketAccessControlProjectTeamResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketAccessControlProjectTeamResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketAccessControlProjectTeamResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
project_number: str,
team: str):
"""
The project team associated with the entity, if any.
:param str project_number: The project number.
:param str team: The team.
"""
pulumi.set(__self__, "project_number", project_number)
pulumi.set(__self__, "team", team)
@property
@pulumi.getter(name="projectNumber")
def project_number(self) -> str:
"""
The project number.
"""
return pulumi.get(self, "project_number")
@property
@pulumi.getter
def team(self) -> str:
"""
The team.
"""
return pulumi.get(self, "team")
@pulumi.output_type
class BucketAccessControlResponse(dict):
"""
An access-control entry.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entityId":
suggest = "entity_id"
elif key == "projectTeam":
suggest = "project_team"
elif key == "selfLink":
suggest = "self_link"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketAccessControlResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketAccessControlResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketAccessControlResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: str,
domain: str,
email: str,
entity: str,
entity_id: str,
etag: str,
kind: str,
project_team: 'outputs.BucketAccessControlProjectTeamResponse',
role: str,
self_link: str):
"""
An access-control entry.
:param str bucket: The name of the bucket.
:param str domain: The domain associated with the entity, if any.
:param str email: The email address associated with the entity, if any.
:param str entity: The entity holding the permission, in one of the following forms:
- user-userId
- user-email
- group-groupId
- group-email
- domain-domain
- project-team-projectId
- allUsers
- allAuthenticatedUsers Examples:
- The user liz@example.com would be user-liz@example.com.
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.
:param str entity_id: The ID for the entity, if any.
:param str etag: HTTP 1.1 Entity tag for the access-control entry.
:param str kind: The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.
:param 'BucketAccessControlProjectTeamResponse' project_team: The project team associated with the entity, if any.
:param str role: The access permission for the entity.
:param str self_link: The link to this access-control entry.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "domain", domain)
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "entity", entity)
pulumi.set(__self__, "entity_id", entity_id)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "project_team", project_team)
pulumi.set(__self__, "role", role)
pulumi.set(__self__, "self_link", self_link)
@property
@pulumi.getter
def bucket(self) -> str:
"""
The name of the bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def domain(self) -> str:
"""
The domain associated with the entity, if any.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def email(self) -> str:
"""
The email address associated with the entity, if any.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def entity(self) -> str:
"""
The entity holding the permission, in one of the following forms:
- user-userId
- user-email
- group-groupId
- group-email
- domain-domain
- project-team-projectId
- allUsers
- allAuthenticatedUsers Examples:
- The user liz@example.com would be user-liz@example.com.
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.
"""
return pulumi.get(self, "entity")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> str:
"""
The ID for the entity, if any.
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter
def etag(self) -> str:
"""
HTTP 1.1 Entity tag for the access-control entry.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="projectTeam")
def project_team(self) -> 'outputs.BucketAccessControlProjectTeamResponse':
"""
The project team associated with the entity, if any.
"""
return pulumi.get(self, "project_team")
@property
@pulumi.getter
def role(self) -> str:
"""
The access permission for the entity.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
The link to this access-control entry.
"""
return pulumi.get(self, "self_link")
@pulumi.output_type
class BucketAutoclassResponse(dict):
"""
The bucket's Autoclass configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "toggleTime":
suggest = "toggle_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketAutoclassResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketAutoclassResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketAutoclassResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
toggle_time: str):
"""
The bucket's Autoclass configuration.
:param bool enabled: Whether or not Autoclass is enabled on this bucket
:param str toggle_time: A date and time in RFC 3339 format representing the instant at which "enabled" was last toggled.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "toggle_time", toggle_time)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Whether or not Autoclass is enabled on this bucket
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="toggleTime")
def toggle_time(self) -> str:
"""
A date and time in RFC 3339 format representing the instant at which "enabled" was last toggled.
"""
return pulumi.get(self, "toggle_time")
@pulumi.output_type
class BucketBillingResponse(dict):
"""
The bucket's billing configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "requesterPays":
suggest = "requester_pays"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketBillingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketBillingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketBillingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
requester_pays: bool):
"""
The bucket's billing configuration.
:param bool requester_pays: When set to true, Requester Pays is enabled for this bucket.
"""
pulumi.set(__self__, "requester_pays", requester_pays)
@property
@pulumi.getter(name="requesterPays")
def requester_pays(self) -> bool:
"""
When set to true, Requester Pays is enabled for this bucket.
"""
return pulumi.get(self, "requester_pays")
@pulumi.output_type
class BucketCorsItemResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxAgeSeconds":
suggest = "max_age_seconds"
elif key == "responseHeader":
suggest = "response_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketCorsItemResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketCorsItemResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketCorsItemResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_age_seconds: int,
method: Sequence[str],
origin: Sequence[str],
response_header: Sequence[str]):
"""
:param int max_age_seconds: The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.
:param Sequence[str] method: The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".
:param Sequence[str] origin: The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".
:param Sequence[str] response_header: The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.
"""
pulumi.set(__self__, "max_age_seconds", max_age_seconds)
pulumi.set(__self__, "method", method)
pulumi.set(__self__, "origin", origin)
pulumi.set(__self__, "response_header", response_header)
@property
@pulumi.getter(name="maxAgeSeconds")
def max_age_seconds(self) -> int:
"""
The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.
"""
return pulumi.get(self, "max_age_seconds")
@property
@pulumi.getter
def method(self) -> Sequence[str]:
"""
The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def origin(self) -> Sequence[str]:
"""
The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".
"""
return pulumi.get(self, "origin")
@property
@pulumi.getter(name="responseHeader")
def response_header(self) -> Sequence[str]:
"""
The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.
"""
return pulumi.get(self, "response_header")
@pulumi.output_type
class BucketCustomPlacementConfigResponse(dict):
"""
The bucket's custom placement configuration for Custom Dual Regions.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataLocations":
suggest = "data_locations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketCustomPlacementConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketCustomPlacementConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketCustomPlacementConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_locations: Sequence[str]):
"""
The bucket's custom placement configuration for Custom Dual Regions.
:param Sequence[str] data_locations: The list of regional locations in which data is placed.
"""
pulumi.set(__self__, "data_locations", data_locations)
@property
@pulumi.getter(name="dataLocations")
def data_locations(self) -> Sequence[str]:
"""
The list of regional locations in which data is placed.
"""
return pulumi.get(self, "data_locations")
@pulumi.output_type
class BucketEncryptionResponse(dict):
"""
Encryption configuration for a bucket.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultKmsKeyName":
suggest = "default_kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketEncryptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketEncryptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketEncryptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_kms_key_name: str):
"""
Encryption configuration for a bucket.
:param str default_kms_key_name: A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.
"""
pulumi.set(__self__, "default_kms_key_name", default_kms_key_name)
@property
@pulumi.getter(name="defaultKmsKeyName")
def default_kms_key_name(self) -> str:
"""
A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.
"""
return pulumi.get(self, "default_kms_key_name")
@pulumi.output_type
class BucketIamConfigurationBucketPolicyOnlyResponse(dict):
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockedTime":
suggest = "locked_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationBucketPolicyOnlyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationBucketPolicyOnlyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationBucketPolicyOnlyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
locked_time: str):
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
:param bool enabled: If set, access is controlled only by bucket-level or above IAM policies.
:param str locked_time: The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "locked_time", locked_time)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
If set, access is controlled only by bucket-level or above IAM policies.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="lockedTime")
def locked_time(self) -> str:
"""
The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
return pulumi.get(self, "locked_time")
@pulumi.output_type
class BucketIamConfigurationResponse(dict):
"""
The bucket's IAM configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketPolicyOnly":
suggest = "bucket_policy_only"
elif key == "publicAccessPrevention":
suggest = "public_access_prevention"
elif key == "uniformBucketLevelAccess":
suggest = "uniform_bucket_level_access"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_policy_only: 'outputs.BucketIamConfigurationBucketPolicyOnlyResponse',
public_access_prevention: str,
uniform_bucket_level_access: 'outputs.BucketIamConfigurationUniformBucketLevelAccessResponse'):
"""
The bucket's IAM configuration.
:param 'BucketIamConfigurationBucketPolicyOnlyResponse' bucket_policy_only: The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
:param str public_access_prevention: The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.
:param 'BucketIamConfigurationUniformBucketLevelAccessResponse' uniform_bucket_level_access: The bucket's uniform bucket-level access configuration.
"""
pulumi.set(__self__, "bucket_policy_only", bucket_policy_only)
pulumi.set(__self__, "public_access_prevention", public_access_prevention)
pulumi.set(__self__, "uniform_bucket_level_access", uniform_bucket_level_access)
@property
@pulumi.getter(name="bucketPolicyOnly")
def bucket_policy_only(self) -> 'outputs.BucketIamConfigurationBucketPolicyOnlyResponse':
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
"""
return pulumi.get(self, "bucket_policy_only")
@property
@pulumi.getter(name="publicAccessPrevention")
def public_access_prevention(self) -> str:
"""
The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.
"""
return pulumi.get(self, "public_access_prevention")
@property
@pulumi.getter(name="uniformBucketLevelAccess")
def uniform_bucket_level_access(self) -> 'outputs.BucketIamConfigurationUniformBucketLevelAccessResponse':
"""
The bucket's uniform bucket-level access configuration.
"""
return pulumi.get(self, "uniform_bucket_level_access")
@pulumi.output_type
class BucketIamConfigurationUniformBucketLevelAccessResponse(dict):
"""
The bucket's uniform bucket-level access configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockedTime":
suggest = "locked_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationUniformBucketLevelAccessResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationUniformBucketLevelAccessResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationUniformBucketLevelAccessResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
locked_time: str):
"""
The bucket's uniform bucket-level access configuration.
:param bool enabled: If set, access is controlled only by bucket-level or above IAM policies.
:param str locked_time: The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "locked_time", locked_time)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
If set, access is controlled only by bucket-level or above IAM policies.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="lockedTime")
def locked_time(self) -> str:
"""
The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
return pulumi.get(self, "locked_time")
@pulumi.output_type
class BucketIamPolicyBindingsItemResponse(dict):
def __init__(__self__, *,
condition: 'outputs.ExprResponse',
members: Sequence[str],
role: str):
"""
:param 'ExprResponse' condition: The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently.
:param Sequence[str] members: A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows:
- allUsers — A special identifier that represents anyone on the internet; with or without a Google account.
- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account.
- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com.
- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com .
- group:emailid — An email address that represents a Google group. For example, group:admins@example.com.
- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com.
- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project
- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project
- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project
:param str role: The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.
The new IAM roles are:
- roles/storage.admin — Full control of Google Cloud Storage resources.
- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects.
- roles/storage.objectCreator — Access to create objects in Google Cloud Storage.
- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role.
- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role.
- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role.
- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role.
- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def condition(self) -> 'outputs.ExprResponse':
"""
The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def members(self) -> Sequence[str]:
"""
A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows:
- allUsers — A special identifier that represents anyone on the internet; with or without a Google account.
- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account.
- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com.
- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com .
- group:emailid — An email address that represents a Google group. For example, group:admins@example.com.
- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com.
- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project
- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project
- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def role(self) -> str:
"""
The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.
The new IAM roles are:
- roles/storage.admin — Full control of Google Cloud Storage resources.
- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects.
- roles/storage.objectCreator — Access to create objects in Google Cloud Storage.
- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role.
- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role.
- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role.
- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role.
- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.
"""
return pulumi.get(self, "role")
@pulumi.output_type
class BucketLifecycleResponse(dict):
"""
The bucket's lifecycle configuration. See lifecycle management for more information.
"""
def __init__(__self__, *,
rule: Sequence['outputs.BucketLifecycleRuleItemResponse']):
"""
The bucket's lifecycle configuration. See lifecycle management for more information.
:param Sequence['BucketLifecycleRuleItemResponse'] rule: A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.
"""
pulumi.set(__self__, "rule", rule)
@property
@pulumi.getter
def rule(self) -> Sequence['outputs.BucketLifecycleRuleItemResponse']:
"""
A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.
"""
return pulumi.get(self, "rule")
@pulumi.output_type
class BucketLifecycleRuleItemActionResponse(dict):
"""
The action to take.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "storageClass":
suggest = "storage_class"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleRuleItemActionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketLifecycleRuleItemActionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketLifecycleRuleItemActionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
storage_class: str,
type: str):
"""
The action to take.
:param str storage_class: Target storage class. Required iff the type of the action is SetStorageClass.
:param str type: Type of the action. Currently, only Delete and SetStorageClass are supported.
"""
pulumi.set(__self__, "storage_class", storage_class)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="storageClass")
def storage_class(self) -> str:
"""
Target storage class. Required iff the type of the action is SetStorageClass.
"""
return pulumi.get(self, "storage_class")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the action. Currently, only Delete and SetStorageClass are supported.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class BucketLifecycleRuleItemConditionResponse(dict):
"""
The condition(s) under which the action will be taken.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdBefore":
suggest = "created_before"
elif key == "customTimeBefore":
suggest = "custom_time_before"
elif key == "daysSinceCustomTime":
suggest = "days_since_custom_time"
elif key == "daysSinceNoncurrentTime":
suggest = "days_since_noncurrent_time"
elif key == "isLive":
suggest = "is_live"
elif key == "matchesPattern":
suggest = "matches_pattern"
elif key == "matchesStorageClass":
suggest = "matches_storage_class"
elif key == "noncurrentTimeBefore":
suggest = "noncurrent_time_before"
elif key == "numNewerVersions":
suggest = "num_newer_versions"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleRuleItemConditionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketLifecycleRuleItemConditionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketLifecycleRuleItemConditionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
age: int,
created_before: str,
custom_time_before: str,
days_since_custom_time: int,
days_since_noncurrent_time: int,
is_live: bool,
matches_pattern: str,
matches_storage_class: Sequence[str],
noncurrent_time_before: str,
num_newer_versions: int):
"""
The condition(s) under which the action will be taken.
:param int age: Age of an object (in days). This condition is satisfied when an object reaches the specified age.
:param str created_before: A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when an object is created before midnight of the specified date in UTC.
:param str custom_time_before: A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when the custom time on an object is before this date in UTC.
:param int days_since_custom_time: Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.
:param int days_since_noncurrent_time: Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.
:param bool is_live: Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.
:param str matches_pattern: A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the "Early Access" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.
:param Sequence[str] matches_storage_class: Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.
:param str noncurrent_time_before: A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.
:param int num_newer_versions: Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.
"""
pulumi.set(__self__, "age", age)
pulumi.set(__self__, "created_before", created_before)
pulumi.set(__self__, "custom_time_before", custom_time_before)
pulumi.set(__self__, "days_since_custom_time", days_since_custom_time)
pulumi.set(__self__, "days_since_noncurrent_time", days_since_noncurrent_time)
pulumi.set(__self__, "is_live", is_live)
pulumi.set(__self__, "matches_pattern", matches_pattern)
pulumi.set(__self__, "matches_storage_class", matches_storage_class)
pulumi.set(__self__, "noncurrent_time_before", noncurrent_time_before)
pulumi.set(__self__, "num_newer_versions", num_newer_versions)
@property
@pulumi.getter
def age(self) -> int:
"""
Age of an object (in days). This condition is satisfied when an object reaches the specified age.
"""
return pulumi.get(self, "age")
@property
@pulumi.getter(name="createdBefore")
def created_before(self) -> str:
"""
A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when an object is created before midnight of the specified date in UTC.
"""
return pulumi.get(self, "created_before")
@property
@pulumi.getter(name="customTimeBefore")
def custom_time_before(self) -> str:
"""
A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when the custom time on an object is before this date in UTC.
"""
return pulumi.get(self, "custom_time_before")
@property
@pulumi.getter(name="daysSinceCustomTime")
def days_since_custom_time(self) -> int:
"""
Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.
"""
return pulumi.get(self, "days_since_custom_time")
@property
@pulumi.getter(name="daysSinceNoncurrentTime")
def days_since_noncurrent_time(self) -> int:
"""
Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.
"""
return pulumi.get(self, "days_since_noncurrent_time")
@property
@pulumi.getter(name="isLive")
def is_live(self) -> bool:
"""
Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.
"""
return pulumi.get(self, "is_live")
@property
@pulumi.getter(name="matchesPattern")
def matches_pattern(self) -> str:
"""
A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the "Early Access" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.
"""
return pulumi.get(self, "matches_pattern")
@property
@pulumi.getter(name="matchesStorageClass")
def matches_storage_class(self) -> Sequence[str]:
"""
Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.
"""
return pulumi.get(self, "matches_storage_class")
@property
@pulumi.getter(name="noncurrentTimeBefore")
def noncurrent_time_before(self) -> str:
"""
A date in RFC 3339 format with only the date part (for instance, "2013-01-15"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.
"""
return pulumi.get(self, "noncurrent_time_before")
@property
@pulumi.getter(name="numNewerVersions")
def num_newer_versions(self) -> int:
"""
Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.
"""
return pulumi.get(self, "num_newer_versions")
@pulumi.output_type
class BucketLifecycleRuleItemResponse(dict):
def __init__(__self__, *,
action: 'outputs.BucketLifecycleRuleItemActionResponse',
condition: 'outputs.BucketLifecycleRuleItemConditionResponse'):
"""
:param 'BucketLifecycleRuleItemActionResponse' action: The action to take.
:param 'BucketLifecycleRuleItemConditionResponse' condition: The condition(s) under which the action will be taken.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def action(self) -> 'outputs.BucketLifecycleRuleItemActionResponse':
"""
The action to take.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def condition(self) -> 'outputs.BucketLifecycleRuleItemConditionResponse':
"""
The condition(s) under which the action will be taken.
"""
return pulumi.get(self, "condition")
@pulumi.output_type
class BucketLoggingResponse(dict):
"""
The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "logBucket":
suggest = "log_bucket"
elif key == "logObjectPrefix":
suggest = "log_object_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketLoggingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketLoggingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketLoggingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
log_bucket: str,
log_object_prefix: str):
"""
The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.
:param str log_bucket: The destination bucket where the current bucket's logs should be placed.
:param str log_object_prefix: A prefix for log object names.
"""
pulumi.set(__self__, "log_bucket", log_bucket)
pulumi.set(__self__, "log_object_prefix", log_object_prefix)
@property
@pulumi.getter(name="logBucket")
def log_bucket(self) -> str:
"""
The destination bucket where the current bucket's logs should be placed.
"""
return pulumi.get(self, "log_bucket")
@property
@pulumi.getter(name="logObjectPrefix")
def log_object_prefix(self) -> str:
"""
A prefix for log object names.
"""
return pulumi.get(self, "log_object_prefix")
@pulumi.output_type
class BucketObjectCustomerEncryptionResponse(dict):
"""
Metadata of customer-supplied encryption key, if the object is encrypted by such a key.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "encryptionAlgorithm":
suggest = "encryption_algorithm"
elif key == "keySha256":
suggest = "key_sha256"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketObjectCustomerEncryptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketObjectCustomerEncryptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketObjectCustomerEncryptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
encryption_algorithm: str,
key_sha256: str):
"""
Metadata of customer-supplied encryption key, if the object is encrypted by such a key.
:param str encryption_algorithm: The encryption algorithm.
:param str key_sha256: SHA256 hash value of the encryption key.
"""
pulumi.set(__self__, "encryption_algorithm", encryption_algorithm)
pulumi.set(__self__, "key_sha256", key_sha256)
@property
@pulumi.getter(name="encryptionAlgorithm")
def encryption_algorithm(self) -> str:
"""
The encryption algorithm.
"""
return pulumi.get(self, "encryption_algorithm")
@property
@pulumi.getter(name="keySha256")
def key_sha256(self) -> str:
"""
SHA256 hash value of the encryption key.
"""
return pulumi.get(self, "key_sha256")
@pulumi.output_type
class BucketObjectOwnerResponse(dict):
"""
The owner of the object. This will always be the uploader of the object.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entityId":
suggest = "entity_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketObjectOwnerResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketObjectOwnerResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketObjectOwnerResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
entity: str,
entity_id: str):
"""
The owner of the object. This will always be the uploader of the object.
:param str entity: The entity, in the form user-userId.
:param str entity_id: The ID for the entity.
"""
pulumi.set(__self__, "entity", entity)
pulumi.set(__self__, "entity_id", entity_id)
@property
@pulumi.getter
def entity(self) -> str:
"""
The entity, in the form user-userId.
"""
return pulumi.get(self, "entity")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> str:
"""
The ID for the entity.
"""
return pulumi.get(self, "entity_id")
@pulumi.output_type
class BucketOwnerResponse(dict):
"""
The owner of the bucket. This is always the project team's owner group.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entityId":
suggest = "entity_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketOwnerResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketOwnerResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketOwnerResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
entity: str,
entity_id: str):
"""
The owner of the bucket. This is always the project team's owner group.
:param str entity: The entity, in the form project-owner-projectId.
:param str entity_id: The ID for the entity.
"""
pulumi.set(__self__, "entity", entity)
pulumi.set(__self__, "entity_id", entity_id)
@property
@pulumi.getter
def entity(self) -> str:
"""
The entity, in the form project-owner-projectId.
"""
return pulumi.get(self, "entity")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> str:
"""
The ID for the entity.
"""
return pulumi.get(self, "entity_id")
@pulumi.output_type
class BucketRetentionPolicyResponse(dict):
"""
The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "effectiveTime":
suggest = "effective_time"
elif key == "isLocked":
suggest = "is_locked"
elif key == "retentionPeriod":
suggest = "retention_period"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketRetentionPolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketRetentionPolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketRetentionPolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
effective_time: str,
is_locked: bool,
retention_period: str):
"""
The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.
:param str effective_time: Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.
:param bool is_locked: Once locked, an object retention policy cannot be modified.
:param str retention_period: The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.
"""
pulumi.set(__self__, "effective_time", effective_time)
pulumi.set(__self__, "is_locked", is_locked)
pulumi.set(__self__, "retention_period", retention_period)
@property
@pulumi.getter(name="effectiveTime")
def effective_time(self) -> str:
"""
Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.
"""
return pulumi.get(self, "effective_time")
@property
@pulumi.getter(name="isLocked")
def is_locked(self) -> bool:
"""
Once locked, an object retention policy cannot be modified.
"""
return pulumi.get(self, "is_locked")
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> str:
"""
The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.
"""
return pulumi.get(self, "retention_period")
@pulumi.output_type
class BucketVersioningResponse(dict):
"""
The bucket's versioning configuration.
"""
def __init__(__self__, *,
enabled: bool):
"""
The bucket's versioning configuration.
:param bool enabled: While set to true, versioning is fully enabled for this bucket.
"""
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
While set to true, versioning is fully enabled for this bucket.
"""
return pulumi.get(self, "enabled")
@pulumi.output_type
class BucketWebsiteResponse(dict):
"""
The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mainPageSuffix":
suggest = "main_page_suffix"
elif key == "notFoundPage":
suggest = "not_found_page"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketWebsiteResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketWebsiteResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketWebsiteResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
main_page_suffix: str,
not_found_page: str):
"""
The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.
:param str main_page_suffix: If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.
:param str not_found_page: If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.
"""
pulumi.set(__self__, "main_page_suffix", main_page_suffix)
pulumi.set(__self__, "not_found_page", not_found_page)
@property
@pulumi.getter(name="mainPageSuffix")
def main_page_suffix(self) -> str:
"""
If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.
"""
return pulumi.get(self, "main_page_suffix")
@property
@pulumi.getter(name="notFoundPage")
def not_found_page(self) -> str:
"""
If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.
"""
return pulumi.get(self, "not_found_page")
@pulumi.output_type
class DefaultObjectAccessControlProjectTeamResponse(dict):
"""
The project team associated with the entity, if any.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "projectNumber":
suggest = "project_number"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DefaultObjectAccessControlProjectTeamResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DefaultObjectAccessControlProjectTeamResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DefaultObjectAccessControlProjectTeamResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
project_number: str,
team: str):
"""
The project team associated with the entity, if any.
:param str project_number: The project number.
:param str team: The team.
"""
pulumi.set(__self__, "project_number", project_number)
pulumi.set(__self__, "team", team)
@property
@pulumi.getter(name="projectNumber")
def project_number(self) -> str:
"""
The project number.
"""
return pulumi.get(self, "project_number")
@property
@pulumi.getter
def team(self) -> str:
"""
The team.
"""
return pulumi.get(self, "team")
@pulumi.output_type
class ExprResponse(dict):
"""
Represents an expression text. Example: title: "User account presence" description: "Determines whether the request has a user account" expression: "size(request.user) > 0"
"""
def __init__(__self__, *,
description: str,
expression: str,
location: str,
title: str):
"""
Represents an expression text. Example: title: "User account presence" description: "Determines whether the request has a user account" expression: "size(request.user) > 0"
:param str description: An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
:param str expression: Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.
:param str location: An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
:param str title: An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def expression(self) -> str:
"""
Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.
"""
return pulumi.get(self, "expression")
@property
@pulumi.getter
def location(self) -> str:
"""
An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def title(self) -> str:
"""
An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class ObjectAccessControlProjectTeamResponse(dict):
"""
The project team associated with the entity, if any.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "projectNumber":
suggest = "project_number"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectAccessControlProjectTeamResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectAccessControlProjectTeamResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectAccessControlProjectTeamResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
project_number: str,
team: str):
"""
The project team associated with the entity, if any.
:param str project_number: The project number.
:param str team: The team.
"""
pulumi.set(__self__, "project_number", project_number)
pulumi.set(__self__, "team", team)
@property
@pulumi.getter(name="projectNumber")
def project_number(self) -> str:
"""
The project number.
"""
return pulumi.get(self, "project_number")
@property
@pulumi.getter
def team(self) -> str:
"""
The team.
"""
return pulumi.get(self, "team")
@pulumi.output_type
class ObjectAccessControlResponse(dict):
"""
An access-control entry.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "entityId":
suggest = "entity_id"
elif key == "projectTeam":
suggest = "project_team"
elif key == "selfLink":
suggest = "self_link"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectAccessControlResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectAccessControlResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectAccessControlResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: str,
domain: str,
email: str,
entity: str,
entity_id: str,
etag: str,
generation: str,
kind: str,
object: str,
project_team: 'outputs.ObjectAccessControlProjectTeamResponse',
role: str,
self_link: str):
"""
An access-control entry.
:param str bucket: The name of the bucket.
:param str domain: The domain associated with the entity, if any.
:param str email: The email address associated with the entity, if any.
:param str entity: The entity holding the permission, in one of the following forms:
- user-userId
- user-email
- group-groupId
- group-email
- domain-domain
- project-team-projectId
- allUsers
- allAuthenticatedUsers Examples:
- The user liz@example.com would be user-liz@example.com.
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.
:param str entity_id: The ID for the entity, if any.
:param str etag: HTTP 1.1 Entity tag for the access-control entry.
:param str generation: The content generation of the object, if applied to an object.
:param str kind: The kind of item this is. For object access control entries, this is always storage#objectAccessControl.
:param str object: The name of the object, if applied to an object.
:param 'ObjectAccessControlProjectTeamResponse' project_team: The project team associated with the entity, if any.
:param str role: The access permission for the entity.
:param str self_link: The link to this access-control entry.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "domain", domain)
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "entity", entity)
pulumi.set(__self__, "entity_id", entity_id)
pulumi.set(__self__, "etag", etag)
pulumi.set(__self__, "generation", generation)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "object", object)
pulumi.set(__self__, "project_team", project_team)
pulumi.set(__self__, "role", role)
pulumi.set(__self__, "self_link", self_link)
@property
@pulumi.getter
def bucket(self) -> str:
"""
The name of the bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def domain(self) -> str:
"""
The domain associated with the entity, if any.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def email(self) -> str:
"""
The email address associated with the entity, if any.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def entity(self) -> str:
"""
The entity holding the permission, in one of the following forms:
- user-userId
- user-email
- group-groupId
- group-email
- domain-domain
- project-team-projectId
- allUsers
- allAuthenticatedUsers Examples:
- The user liz@example.com would be user-liz@example.com.
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.
"""
return pulumi.get(self, "entity")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> str:
"""
The ID for the entity, if any.
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter
def etag(self) -> str:
"""
HTTP 1.1 Entity tag for the access-control entry.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def generation(self) -> str:
"""
The content generation of the object, if applied to an object.
"""
return pulumi.get(self, "generation")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of item this is. For object access control entries, this is always storage#objectAccessControl.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def object(self) -> str:
"""
The name of the object, if applied to an object.
"""
return pulumi.get(self, "object")
@property
@pulumi.getter(name="projectTeam")
def project_team(self) -> 'outputs.ObjectAccessControlProjectTeamResponse':
"""
The project team associated with the entity, if any.
"""
return pulumi.get(self, "project_team")
@property
@pulumi.getter
def role(self) -> str:
"""
The access permission for the entity.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
The link to this access-control entry.
"""
return pulumi.get(self, "self_link")
@pulumi.output_type
class ObjectIamPolicyBindingsItemResponse(dict):
def __init__(__self__, *,
condition: 'outputs.ExprResponse',
members: Sequence[str],
role: str):
"""
:param 'ExprResponse' condition: The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently.
:param Sequence[str] members: A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows:
- allUsers — A special identifier that represents anyone on the internet; with or without a Google account.
- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account.
- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com.
- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com .
- group:emailid — An email address that represents a Google group. For example, group:admins@example.com.
- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com.
- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project
- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project
- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project
:param str role: The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.
The new IAM roles are:
- roles/storage.admin — Full control of Google Cloud Storage resources.
- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects.
- roles/storage.objectCreator — Access to create objects in Google Cloud Storage.
- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role.
- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role.
- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role.
- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role.
- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def condition(self) -> 'outputs.ExprResponse':
"""
The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def members(self) -> Sequence[str]:
"""
A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows:
- allUsers — A special identifier that represents anyone on the internet; with or without a Google account.
- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account.
- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com.
- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com .
- group:emailid — An email address that represents a Google group. For example, group:admins@example.com.
- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com.
- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project
- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project
- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def role(self) -> str:
"""
The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.
The new IAM roles are:
- roles/storage.admin — Full control of Google Cloud Storage resources.
- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects.
- roles/storage.objectCreator — Access to create objects in Google Cloud Storage.
- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role.
- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role.
- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role.
- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role.
- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.
"""
return pulumi.get(self, "role")
| 44.802624
| 607
| 0.665058
| 9,381
| 78,539
| 5.430658
| 0.064279
| 0.01429
| 0.02118
| 0.030955
| 0.800157
| 0.77356
| 0.768711
| 0.737383
| 0.728982
| 0.722799
| 0
| 0.002791
| 0.251837
| 78,539
| 1,752
| 608
| 44.828196
| 0.862985
| 0.456092
| 0
| 0.639698
| 1
| 0.022654
| 0.195576
| 0.070439
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186624
| false
| 0
| 0.006472
| 0
| 0.357066
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e6a7ebf142ab7f218f1af4a8b407ee1642cea30
| 8,536
|
py
|
Python
|
tests/st/ops/gpu/test_fake_quant_perlayer_grad.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/ops/gpu/test_fake_quant_perlayer_grad.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/ops/gpu/test_fake_quant_perlayer_grad.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import Tensor
import mindspore.nn as nn
import mindspore.context as context
from mindspore.ops.operations import _quant_ops as Q
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
class Net(nn.Cell):
def __init__(self, num_bits=8, narrow_range=False):
super(Net, self).__init__()
self.op = Q.FakeQuantPerLayerGrad(num_bits=num_bits, narrow_range=narrow_range)
def construct(self, dout, x, minq, maxq):
return self.op(dout, x, minq, maxq)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad1():
# WithArgsGradient RegularRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.5, 63.6]).astype(np.float32)
min_val = np.array([-0.125]).reshape(1).astype(np.float32)
max_val = np.array([63.625]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad2():
# WithArgsGradient NarrowRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.25, 63.3]).astype(np.float32)
min_val = np.array([-0.125]).reshape(1).astype(np.float32)
max_val = np.array([63.375]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad3():
# WithArgsGradient_4Bits_RegularRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.6, -0.5, -0.4, 0.0, 7.0, 7.1]).astype(np.float32)
min_val = np.array([-0.4]).reshape(1).astype(np.float32)
max_val = np.array([7.1]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=4, narrow_range=False)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad4():
# WithArgsGradient_4Bits_NarrowRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.6, -0.5, -0.4, 0.0, 6.5, 6.6]).astype(np.float32)
min_val = np.array([-0.4]).reshape(1).astype(np.float32)
max_val = np.array([6.6]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=4, narrow_range=True)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad5():
# FakeQuantWithMinMaxVarsGradient
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).astype(np.float32)
min_val = np.array([0.0]).reshape(1).astype(np.float32)
max_val = np.array([0.0]).reshape(1).astype(np.float32)
expect = dout
net = Net(num_bits=8, narrow_range=True)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad6():
# WithVarsGradient_RegularRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.5, 63.6]).astype(np.float32)
min_val = np.array([-0.125]).reshape(1).astype(np.float32)
max_val = np.array([63.625]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=False)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad7():
# WithVarsGradient_NarrowRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.26, -0.25, -0.24, 0.0, 63.25, 63.3]).astype(np.float32)
min_val = np.array([-0.125]).reshape(1).astype(np.float32)
max_val = np.array([63.375]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=8, narrow_range=True)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad8():
# WithVarsGradient_4Bits_RegularRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.6, -0.5, -0.4, 0.0, 7.0, 7.1]).astype(np.float32)
min_val = np.array([-0.4]).reshape(1).astype(np.float32)
max_val = np.array([7.1]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=4, narrow_range=False)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_fake_quant_grad9():
# WithVarsGradient_4Bits_NarrowRange
dout = np.random.uniform(-1, 1, size=[6]).astype('float32')
x = np.array([-0.6, -0.5, -0.4, 0.0, 6.5, 6.6]).astype(np.float32)
min_val = np.array([-0.4]).reshape(1).astype(np.float32)
max_val = np.array([6.6]).reshape(1).astype(np.float32)
expect = np.array([0.0, dout[1], dout[2], dout[3], dout[4], 0.0]).astype(np.float32)
net = Net(num_bits=4, narrow_range=True)
output = net(Tensor(dout), Tensor(x), Tensor(min_val), Tensor(max_val))
error = np.ones(shape=expect.shape) * 1.0e-5
diff = output.asnumpy().flatten() - expect
print("output: ", output)
print("expect: ", expect)
assert np.all(np.abs(diff) < error)
| 38.45045
| 88
| 0.664011
| 1,376
| 8,536
| 4.013081
| 0.121366
| 0.013401
| 0.095074
| 0.05795
| 0.815103
| 0.815103
| 0.80967
| 0.80967
| 0.806411
| 0.805867
| 0
| 0.064339
| 0.147844
| 8,536
| 221
| 89
| 38.624434
| 0.694803
| 0.10895
| 0
| 0.834395
| 0
| 0
| 0.027697
| 0
| 0
| 0
| 0
| 0
| 0.057325
| 1
| 0.070064
| false
| 0
| 0.038217
| 0.006369
| 0.121019
| 0.11465
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e6ef865780037fc8341d168eb1e6a094f5ab0fd
| 25,171
|
py
|
Python
|
tests/test_vectors.py
|
ashcherbakov/peer-did-python
|
3d29c6a7935c0398fd0f9e353a614bc83dc514a0
|
[
"Apache-2.0"
] | 3
|
2021-09-04T19:31:12.000Z
|
2022-01-28T12:51:27.000Z
|
tests/test_vectors.py
|
ashcherbakov/peer-did-python
|
3d29c6a7935c0398fd0f9e353a614bc83dc514a0
|
[
"Apache-2.0"
] | 1
|
2021-09-03T07:23:12.000Z
|
2021-09-03T07:23:12.000Z
|
tests/test_vectors.py
|
ashcherbakov/peer-did-python
|
3d29c6a7935c0398fd0f9e353a614bc83dc514a0
|
[
"Apache-2.0"
] | 3
|
2021-08-02T12:56:46.000Z
|
2021-09-28T09:18:37.000Z
|
PEER_DID_NUMALGO_0 = "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
DID_DOC_NUMALGO_O_BASE58 = """
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"authentication": [
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2018",
"controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7"
}
]
}
"""
DID_DOC_NUMALGO_O_MULTIBASE = """
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"authentication": [
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
}
]
}
"""
DID_DOC_NUMALGO_O_JWK = """
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"authentication": [
{
"id": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "JsonWebKey2020",
"controller": "did:peer:0z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"publicKeyJwk": {
"kty": "OKP",
"crv": "Ed25519",
"x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA"
}
}
]
}
"""
PEER_DID_NUMALGO_2 = (
"did:peer:2"
+ ".Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc"
+ ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
+ ".Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg"
+ ".SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0"
)
DID_DOC_NUMALGO_2_BASE58 = """
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"authentication": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2018",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyBase58": "ByHnpUCFb1vAfh9CFZ8ZkmUZguURW8nSw889hy6rD8L7"
},
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",
"type": "Ed25519VerificationKey2018",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyBase58": "3M5RCDjPTWPkKSN3sxUmmMqHbmRPegYP1tjcKyrDbt9J"
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",
"type": "X25519KeyAgreementKey2019",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyBase58": "JhNWeSVLMYccCk7iopQW4guaSJTojqpMEELgSLhKwRr"
}
],
"service": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",
"type": "DIDCommMessaging",
"serviceEndpoint": "https://example.com/endpoint",
"routingKeys": [
"did:example:somemediator#somekey"
],
"accept": [
"didcomm/v2", "didcomm/aip2;env=rfc587"
]
}
]
}
"""
DID_DOC_NUMALGO_2_MULTIBASE = """
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"authentication": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
},
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg"
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",
"type": "X25519KeyAgreementKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc"
}
],
"service": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",
"type": "DIDCommMessaging",
"serviceEndpoint": "https://example.com/endpoint",
"routingKeys": [
"did:example:somemediator#somekey"
],
"accept": [
"didcomm/v2", "didcomm/aip2;env=rfc587"
]
}
]
}
"""
DID_DOC_NUMALGO_2_JWK = """
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"authentication": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "JsonWebKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyJwk": {
"kty": "OKP",
"crv": "Ed25519",
"x": "owBhCbktDjkfS6PdQddT0D3yjSitaSysP3YimJ_YgmA"
}
},
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",
"type": "JsonWebKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyJwk": {
"kty": "OKP",
"crv": "Ed25519",
"x": "Itv8B__b1-Jos3LCpUe8EdTFGTCa_Dza6_3848P3R70"
}
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",
"type": "JsonWebKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0",
"publicKeyJwk": {
"kty": "OKP",
"crv": "X25519",
"x": "BIiFcQEn3dfvB2pjlhOQQour6jXy9d5s2FKEJNTOJik"
}
}
],
"service": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCIsInIiOlsiZGlkOmV4YW1wbGU6c29tZW1lZGlhdG9yI3NvbWVrZXkiXSwiYSI6WyJkaWRjb21tL3YyIiwiZGlkY29tbS9haXAyO2Vudj1yZmM1ODciXX0#didcommmessaging-0",
"type": "DIDCommMessaging",
"serviceEndpoint": "https://example.com/endpoint",
"routingKeys": [
"did:example:somemediator#somekey"
],
"accept": [
"didcomm/v2", "didcomm/aip2;env=rfc587"
]
}
]
}
"""
PEER_DID_NUMALGO_2_2_SERVICES = (
"did:peer:2"
+ ".Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"
+ ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
+ ".SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0"
)
DID_DOC_NUMALGO_2_MULTIBASE_2_SERVICES = """
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",
"authentication": [
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",
"publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud",
"type": "X25519KeyAgreementKey2020",
"controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0",
"publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"
}
],
"service": [
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#didcommmessaging-0",
"type": "DIDCommMessaging",
"serviceEndpoint": "https://example.com/endpoint",
"routingKeys": [
"did:example:somemediator#somekey"
]
},
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.SW3sidCI6ImRtIiwicyI6Imh0dHBzOi8vZXhhbXBsZS5jb20vZW5kcG9pbnQiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5Il19LHsidCI6ImV4YW1wbGUiLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludDIiLCJyIjpbImRpZDpleGFtcGxlOnNvbWVtZWRpYXRvciNzb21la2V5MiJdLCJhIjpbImRpZGNvbW0vdjIiLCJkaWRjb21tL2FpcDI7ZW52PXJmYzU4NyJdfV0#example-1",
"type": "example",
"serviceEndpoint": "https://example.com/endpoint2",
"routingKeys": [
"did:example:somemediator#somekey2"
],
"accept": ["didcomm/v2", "didcomm/aip2;env=rfc587"]
}
]
}
"""
PEER_DID_NUMALGO_2_NO_SERVICES = (
"did:peer:2"
+ ".Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"
+ ".Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
)
DID_DOC_NUMALGO_2_MULTIBASE_NO_SERVICES = """
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"authentication": [
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V#6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud",
"type": "X25519KeyAgreementKey2020",
"controller": "did:peer:2.Ez6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"publicKeyMultibase": "z6LSpSrLxbAhg2SHwKk7kwpsH7DM7QjFS5iK6qP87eViohud"
}
]
}
"""
PEER_DID_NUMALGO_2_MINIMAL_SERVICES = "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9"
DID_DOC_NUMALGO_2_MULTIBASE_MINIMAL_SERVICES = """
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",
"authentication": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",
"publicKeyMultibase": "z6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V"
},
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg",
"type": "Ed25519VerificationKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",
"publicKeyMultibase": "z6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg"
}
],
"keyAgreement": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc",
"type": "X25519KeyAgreementKey2020",
"controller": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9",
"publicKeyMultibase": "z6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc"
}
],
"service": [
{
"id": "did:peer:2.Ez6LSbysY2xFMRpGMhb7tFTLMpeuPRaqaWM1yECx2AtzE3KCc.Vz6MkqRYqQiSgvZQdnBytw86Qbs2ZWUkGv22od935YF4s8M7V.Vz6MkgoLTnTypo3tDRwCkZXSccTPHRLhF4ZnjhueYAFpEX6vg.SeyJ0IjoiZG0iLCJzIjoiaHR0cHM6Ly9leGFtcGxlLmNvbS9lbmRwb2ludCJ9#didcommmessaging-0",
"serviceEndpoint": "https://example.com/endpoint",
"type": "DIDCommMessaging"
}
]
}
"""
| 82.799342
| 492
| 0.755314
| 724
| 25,171
| 26.164365
| 0.107735
| 0.021433
| 0.020271
| 0.102307
| 0.949322
| 0.942406
| 0.908145
| 0.761548
| 0.68912
| 0.584174
| 0
| 0.136005
| 0.188232
| 25,171
| 303
| 493
| 83.072607
| 0.791073
| 0
| 0
| 0.551724
| 0
| 0.013793
| 0.974852
| 0.714115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e7ad5c90e2f6bdf05b4794fcb9b90dd4e3c3069
| 17,989
|
py
|
Python
|
modern_logic_client/api/executions_api.py
|
latourette359/modern_logic_client
|
16d415e1b07a66a975dc08a67465c0d70c90cbac
|
[
"MIT"
] | null | null | null |
modern_logic_client/api/executions_api.py
|
latourette359/modern_logic_client
|
16d415e1b07a66a975dc08a67465c0d70c90cbac
|
[
"MIT"
] | null | null | null |
modern_logic_client/api/executions_api.py
|
latourette359/modern_logic_client
|
16d415e1b07a66a975dc08a67465c0d70c90cbac
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Modern Logic Api
Manage and version your customer decision logic outside of your codebase # noqa: E501
OpenAPI spec version: 1.0.0
Contact: info@usemodernlogic.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from modern_logic_client.api_client import ApiClient
class ExecutionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def customer_customer_id_executions_get(self, customer_id, **kwargs): # noqa: E501
"""List Customer Executions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.customer_customer_id_executions_get(customer_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: Customer id that the client supplied (required)
:param int page_size: Number of elements to return (default is 10)
:param int page_number: Lists are ordered by creation date ascending. To return the first page, set pageNumber to zero
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.customer_customer_id_executions_get_with_http_info(customer_id, **kwargs) # noqa: E501
else:
(data) = self.customer_customer_id_executions_get_with_http_info(customer_id, **kwargs) # noqa: E501
return data
def customer_customer_id_executions_get_with_http_info(self, customer_id, **kwargs): # noqa: E501
"""List Customer Executions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.customer_customer_id_executions_get_with_http_info(customer_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: Customer id that the client supplied (required)
:param int page_size: Number of elements to return (default is 10)
:param int page_number: Lists are ordered by creation date ascending. To return the first page, set pageNumber to zero
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page_number'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method customer_customer_id_executions_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `customer_customer_id_executions_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/customer/{customerId}/executions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2004', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def execution_execution_id_get(self, execution_id, **kwargs): # noqa: E501
"""Get Execution Details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_execution_id_get(execution_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int execution_id: Execution id (required)
:return: Execution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.execution_execution_id_get_with_http_info(execution_id, **kwargs) # noqa: E501
else:
(data) = self.execution_execution_id_get_with_http_info(execution_id, **kwargs) # noqa: E501
return data
def execution_execution_id_get_with_http_info(self, execution_id, **kwargs): # noqa: E501
"""Get Execution Details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_execution_id_get_with_http_info(execution_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int execution_id: Execution id (required)
:return: Execution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['execution_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method execution_execution_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'execution_id' is set
if ('execution_id' not in params or
params['execution_id'] is None):
raise ValueError("Missing the required parameter `execution_id` when calling `execution_execution_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'execution_id' in params:
path_params['executionId'] = params['execution_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/execution/{executionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Execution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def execution_execution_id_resume_post(self, body, execution_id, **kwargs): # noqa: E501
"""Resume Execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_execution_id_resume_post(body, execution_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param dict(str, object) body: Execution Information (required)
:param int execution_id: execution id (required)
:return: WorkflowExecutionResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.execution_execution_id_resume_post_with_http_info(body, execution_id, **kwargs) # noqa: E501
else:
(data) = self.execution_execution_id_resume_post_with_http_info(body, execution_id, **kwargs) # noqa: E501
return data
def execution_execution_id_resume_post_with_http_info(self, body, execution_id, **kwargs): # noqa: E501
"""Resume Execution # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_execution_id_resume_post_with_http_info(body, execution_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param dict(str, object) body: Execution Information (required)
:param int execution_id: execution id (required)
:return: WorkflowExecutionResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'execution_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method execution_execution_id_resume_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `execution_execution_id_resume_post`") # noqa: E501
# verify the required parameter 'execution_id' is set
if ('execution_id' not in params or
params['execution_id'] is None):
raise ValueError("Missing the required parameter `execution_id` when calling `execution_execution_id_resume_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'execution_id' in params:
path_params['executionId'] = params['execution_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/execution/{executionId}/resume', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkflowExecutionResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def execution_get(self, **kwargs): # noqa: E501
"""List executions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Number of elements to return (default is 10)
:param int page_number: Lists are ordered by creation date ascending. To return the first page, set pageNumber to zero
:param str alert_type: The alert status of this execution
:param date before: Filter executions to those that occurred before the given date.
:param date after: Filter executions to those that occurred after the given date.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.execution_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.execution_get_with_http_info(**kwargs) # noqa: E501
return data
def execution_get_with_http_info(self, **kwargs): # noqa: E501
"""List executions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execution_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Number of elements to return (default is 10)
:param int page_number: Lists are ordered by creation date ascending. To return the first page, set pageNumber to zero
:param str alert_type: The alert status of this execution
:param date before: Filter executions to those that occurred before the given date.
:param date after: Filter executions to those that occurred after the given date.
:return: InlineResponse2004
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page_number', 'alert_type', 'before', 'after'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method execution_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'alert_type' in params:
query_params.append(('alertType', params['alert_type'])) # noqa: E501
if 'before' in params:
query_params.append(('before', params['before'])) # noqa: E501
if 'after' in params:
query_params.append(('after', params['after'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/execution', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2004', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.070776
| 141
| 0.628551
| 2,094
| 17,989
| 5.142789
| 0.090258
| 0.042344
| 0.031572
| 0.026743
| 0.912434
| 0.887826
| 0.88179
| 0.868604
| 0.851054
| 0.833225
| 0
| 0.01648
| 0.284896
| 17,989
| 437
| 142
| 41.16476
| 0.820662
| 0.347101
| 0
| 0.733906
| 0
| 0
| 0.19832
| 0.056017
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038627
| false
| 0
| 0.017167
| 0
| 0.111588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
21a8332c1aa09d21c074e79724a7b24e745bbbab
| 45,702
|
py
|
Python
|
devilry/devilry_admin/tests/assignment/students/test_create_groups_accumulated_score.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/tests/assignment/students/test_create_groups_accumulated_score.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
devilry/devilry_admin/tests/assignment/students/test_create_groups_accumulated_score.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | null | null | null |
import mock
from django import test
from django.conf import settings
from django.contrib import messages
from django.http import Http404
from django_cradmin import cradmin_testhelpers
from django.contrib.contenttypes.models import ContentType
from model_mommy import mommy
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import devilry_group_mommy_factories as group_mommy
from devilry.apps.core.models import AssignmentGroup, Candidate, Assignment
from devilry.devilry_admin.views.assignment.students.create_groups_accumulated_score import \
PreviewRelatedstudentsListView, SelectAssignmentsView
class TestAccumulatedScoreSelectAssignmentsView(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = SelectAssignmentsView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_no_assignments(self):
test_assignment = mommy.make('core.Assignment')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment
)
self.assertFalse(mockresponse.selector.exists('.django-cradmin-listbuilder-itemvalue'))
def test_no_assignments_on_same_period(self):
test_period = mommy.make('core.Period')
test_assignment1 = mommy.make('core.Assignment', parentnode=test_period)
mommy.make('core.Assignment')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment1
)
self.assertFalse(mockresponse.selector.exists('.django-cradmin-listbuilder-itemvalue'))
def test_assignment_info(self):
current_assignment = mommy.make('core.Assignment')
mommy.make('core.Assignment', long_name='Test Assignment', max_points=123,
parentnode=current_assignment.parentnode)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment
)
selector = mockresponse.selector
self.assertEqual(
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Test Assignment')
self.assertEqual(
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-description').alltext_normalized,
'Max points: 123 Grading plugin: Passed/failed')
def test_assignments_multiple(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', long_name='Test Assignment 1', max_points=123,
parentnode=current_assignment.parentnode)
test_assignment2 = mommy.make('core.Assignment', long_name='Test Assignment 2', max_points=123,
parentnode=current_assignment.parentnode)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment
)
selector = mockresponse.selector
assignment_names = [element.alltext_normalized for element in
selector.list('.django-cradmin-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(len(assignment_names), 2)
self.assertIn(test_assignment1.long_name, assignment_names)
self.assertIn(test_assignment2.long_name, assignment_names)
def test_session_data_cleared(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', long_name='Test Assignment 1', max_points=123,
parentnode=current_assignment.parentnode)
test_assignment2 = mommy.make('core.Assignment', long_name='Test Assignment 2', max_points=123,
parentnode=current_assignment.parentnode)
session = self.client.session
session['selected_assignment_ids'] = [125, 312]
session['from_select_assignment_view'] = ''
session['points_threshold'] = 512
session.save()
self.assertEqual(len(list(self.client.session.keys())), 3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock=self.client.session)
self.assertEqual(len(list(mockresponse.request.session.keys())), 0)
def test_post_session_data_set(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', long_name='Test Assignment 1', max_points=123,
parentnode=current_assignment.parentnode)
test_assignment2 = mommy.make('core.Assignment', long_name='Test Assignment 2', max_points=123,
parentnode=current_assignment.parentnode)
self.assertEqual(len(list(self.client.session.keys())), 0)
mockresponse = self.mock_http302_postrequest(
cradmin_role=current_assignment,
sessionmock=self.client.session,
requestkwargs={
'data': {
'selected_items': [test_assignment1.id, test_assignment2.id],
'points_threshold': 123
}
})
self.assertEqual(len(list(mockresponse.request.session.keys())), 3)
self.assertEqual(mockresponse.request.session['from_select_assignment_view'], '')
self.assertEqual(mockresponse.request.session['points_threshold'], 123)
self.assertIn(test_assignment1.id, mockresponse.request.session['selected_assignment_ids'])
self.assertIn(test_assignment2.id, mockresponse.request.session['selected_assignment_ids'])
def test_session_data_cleared_and_set_again(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', long_name='Test Assignment 1', max_points=123,
parentnode=current_assignment.parentnode)
test_assignment2 = mommy.make('core.Assignment', long_name='Test Assignment 2', max_points=123,
parentnode=current_assignment.parentnode)
session = self.client.session
session['selected_assignment_ids'] = [125, 312]
session['from_select_assignment_view'] = ''
session['points_threshold'] = 512
session.save()
self.assertEqual(len(list(self.client.session.keys())), 3)
self.assertEqual(self.client.session['from_select_assignment_view'], '')
self.assertEqual(self.client.session['points_threshold'], 512)
self.assertIn(125, self.client.session['selected_assignment_ids'])
self.assertIn(312, self.client.session['selected_assignment_ids'])
mockresponse = self.mock_http302_postrequest(
cradmin_role=current_assignment,
sessionmock=self.client.session,
requestkwargs={
'data': {
'selected_items': [test_assignment1.id, test_assignment2.id],
'points_threshold': 123
}
})
self.assertEqual(len(list(mockresponse.request.session.keys())), 3)
self.assertEqual(mockresponse.request.session['from_select_assignment_view'], '')
self.assertEqual(mockresponse.request.session['points_threshold'], 123)
self.assertIn(test_assignment1.id, mockresponse.request.session['selected_assignment_ids'])
self.assertIn(test_assignment2.id, mockresponse.request.session['selected_assignment_ids'])
def test_post_without_point_threshold(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', long_name='Test Assignment 1', max_points=123,
parentnode=current_assignment.parentnode)
test_assignment2 = mommy.make('core.Assignment', long_name='Test Assignment 2', max_points=123,
parentnode=current_assignment.parentnode)
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=current_assignment,
sessionmock=self.client.session,
requestkwargs={
'data': {
'selected_items': [test_assignment1.id, test_assignment2.id]
}
})
self.assertEqual(len(list(self.client.session.keys())), 0)
self.assertEqual(mockresponse.selector.one('#error_1_id_points_threshold').alltext_normalized,
'This field is required.')
def test_post_without_selected_items(self):
current_assignment = mommy.make('core.Assignment')
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_role=current_assignment,
sessionmock=self.client.session,
requestkwargs={
'data': {
'selected_items': [],
'points_threshold': 123
}
})
self.assertEqual(len(list(self.client.session.keys())), 0)
class TestPreviewRelatedstudentsListView(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = PreviewRelatedstudentsListView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_from_select_assignment_view_not_in_session(self):
test_assignment = mommy.make('core.Assignment')
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment,
sessionmock={
'selected_assignment_ids': [],
'points_threshold': 10
})
def test_points_threshold_not_in_session(self):
test_assignment = mommy.make('core.Assignment')
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment,
sessionmock={
'selected_assignment_ids': [],
'from_select_assignment_view': ''
})
def test_selected_assignment_ids_not_in_session(self):
test_assignment = mommy.make('core.Assignment')
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment,
sessionmock={
'points_threshold': 123,
'from_select_assignment_view': ''
})
def test_ok(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
self.mock_http200_getrequest_htmls(
cradmin_role=test_assignment,
sessionmock={
'points_threshold': 123,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment.id]
})
def test_selected_assignments_info_box(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 2')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 123,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id]
})
self.assertEqual(
mockresponse.selector.one('.devilry-accumulated-score-selected-assignments').alltext_normalized,
'- Test Assignment 1 - Test Assignment 2')
def test_total_score_for_selected_assignments_info_box(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1', max_points=100)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 2', max_points=150)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 123,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-total-max-score').alltext_normalized,
'Total max score of selected assignments: 250')
def test_threshold_percentage_of_max_score_info_box(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1', max_points=100)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 2', max_points=150)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 123,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-threshold-percentage-of-max-score').alltext_normalized,
'Threshold percentage of max score: {:.2f} %'.format((123.0/250.0) * 100.0))
def test_single_assignment_single_student_not_passed_added_students_count_info_box(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
relatedstudent = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment, grading_points=0)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 1,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-student-count').alltext_normalized,
'Number of students that will be added to the assignment: 0 / 1')
def test_single_assignment_single_student_passed_added_students_count_info_box(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
relatedstudent = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment, grading_points=1)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 1,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-student-count').alltext_normalized,
'Number of students that will be added to the assignment: 1 / 1')
def test_single_assignment_multiple_students_added_students_count_info_box_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
relatedstudent1 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
relatedstudent2 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
relatedstudent3 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment, grading_points=0)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 1,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-student-count').alltext_normalized,
'Number of students that will be added to the assignment: 2 / 3')
def test_multiple_assignment_multiple_students_added_students_count_info_box_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 1')
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode,
long_name='Test Assignment 2')
relatedstudent1 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
relatedstudent2 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
relatedstudent3 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment1, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment2, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment1, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment2, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment1, grading_points=1)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment2, grading_points=0)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'points_threshold': 2,
'from_select_assignment_view': '',
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id]
})
self.assertEqual(
mockresponse.selector.one(
'.devilry-accumulated-score-selected-assignments-student-count').alltext_normalized,
'Number of students that will be added to the assignment: 2 / 3')
def __make_published_feedbackset_for_relatedstudent(self, relatedstudent, assignment, grading_points=0):
group = mommy.make('core.AssignmentGroup', parentnode=assignment)
group_mommy.feedbackset_first_attempt_published(group=group, grading_points=grading_points)
mommy.make('core.Candidate', assignment_group=group, relatedstudent=relatedstudent)
def test_single_assignment_student_has_enough_points_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment, grading_points=25)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment.id],
'points_threshold': 25,
'from_select_assignment_view': ''
})
self.assertEqual(mockresponse.selector.count('.django-cradmin-listbuilder-itemvalue'), 1)
def test_single_assignment_student_does_not_have_enough_points_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment, grading_points=20)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment.id],
'points_threshold': 25,
'from_select_assignment_view': ''
})
self.assertEqual(mockresponse.selector.count('.django-cradmin-listbuilder-itemvalue'), 0)
def test_multiple_assignments_student_has_enough_points_across_assignment_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=25)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
})
self.assertEqual(mockresponse.selector.count('.django-cradmin-listbuilder-itemvalue'), 1)
def test_student_details(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User',
user__shortname='testuser@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=30)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
})
selector = mockresponse.selector
self.assertEqual(
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Test User (testuser@example.com)')
self.assertEqual(
selector.one('.django-cradmin-listbuilder-itemvalue-titledescription-description').alltext_normalized,
'Grading points total: 55')
def test_student_already_on_assignment_is_excluded(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User 2',
user__shortname='testuser2@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=50)
group = mommy.make('core.AssignmentGroup', parentnode=current_assignment)
mommy.make('core.Candidate', relatedstudent=relatedstudent,
assignment_group=group)
mommy.make('devilry_group.FeedbackSet', group=group)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
})
self.assertNotContains(mockresponse.response, relatedstudent.user.fullname)
self.assertNotContains(mockresponse.response, relatedstudent.user.shortname)
def test_student_already_on_assignment_is_excluded_with_qualifying_student(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User 1',
user__shortname='testuser1@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=30)
relatedstudent_on_current_assignment = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User 2',
user__shortname='testuser2@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent_on_current_assignment, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent_on_current_assignment, assignment=test_assignment2, grading_points=50)
group = mommy.make('core.AssignmentGroup', parentnode=current_assignment)
mommy.make('core.Candidate', relatedstudent=relatedstudent_on_current_assignment,
assignment_group=group)
mommy.make('devilry_group.FeedbackSet', group=group)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
})
self.assertNotContains(mockresponse.response, relatedstudent_on_current_assignment.user.fullname)
self.assertNotContains(mockresponse.response, relatedstudent_on_current_assignment.user.shortname)
self.assertContains(mockresponse.response, relatedstudent.user.fullname)
self.assertContains(mockresponse.response, relatedstudent.user.shortname)
def test_post_success_message(self):
current_assignment = mommy.make('core.Assignment', long_name='Current Assignment')
test_assignment = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User',
user__shortname='testuser@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment, grading_points=25)
messagesmock = mock.MagicMock()
self.mock_http302_postrequest(
cradmin_role=current_assignment,
messagesmock=messagesmock,
sessionmock={
'selected_assignment_ids': [test_assignment.id],
'points_threshold': 25,
'from_select_assignment_view': ''
},
requestkwargs={
'data': {
'confirm': ''
}})
messagesmock.add.assert_called_once_with(
messages.SUCCESS,
'1 student(s) added to Current Assignment',
'')
def test_post_one_student_group_created(self):
current_assignment = mommy.make('core.Assignment', long_name='Current Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User',
user__shortname='testuser@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=25)
self.mock_http302_postrequest(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
},
requestkwargs={
'data': {
'confirm': ''
}})
self.assertEqual(AssignmentGroup.objects.filter(parentnode=current_assignment).count(), 1)
assignment_group = AssignmentGroup.objects.filter(parentnode=current_assignment).get()
self.assertEqual(
Candidate.objects.filter(assignment_group=assignment_group, relatedstudent=relatedstudent).count(),
1)
def test_post_multiple_students_multiple_groups_created(self):
current_assignment = mommy.make('core.Assignment', long_name='Current Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent1 = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User1',
user__shortname='testuser1@example.com')
relatedstudent2 = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User2',
user__shortname='testuser2@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment2, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment2, grading_points=25)
self.mock_http302_postrequest(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
},
requestkwargs={
'data': {
'confirm': ''
}})
self.assertEqual(AssignmentGroup.objects.filter(parentnode=current_assignment).count(), 2)
self.assertEqual(Candidate.objects.filter(
relatedstudent=relatedstudent1, assignment_group__parentnode=current_assignment).count(), 1)
self.assertEqual(Candidate.objects.filter(
relatedstudent=relatedstudent2, assignment_group__parentnode=current_assignment).count(), 1)
def test_post_student_already_on_assignment_is_excluded(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User 1',
user__shortname='testuser1@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent, assignment=test_assignment2, grading_points=30)
relatedstudent_on_current_assignment = mommy.make('core.RelatedStudent',
period=current_assignment.parentnode,
user__fullname='Test User 2',
user__shortname='testuser2@example.com')
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent_on_current_assignment, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent_on_current_assignment, assignment=test_assignment2, grading_points=50)
group = mommy.make('core.AssignmentGroup', parentnode=current_assignment)
mommy.make('core.Candidate', relatedstudent=relatedstudent_on_current_assignment,
assignment_group=group)
mommy.make('devilry_group.FeedbackSet', group=group)
self.mock_http302_postrequest(
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id],
'points_threshold': 50,
'from_select_assignment_view': ''
},
requestkwargs={
'data': {
'confirm': ''
}})
self.assertEqual(
Candidate.objects.filter(relatedstudent=relatedstudent_on_current_assignment,
assignment_group__parentnode=current_assignment).count(),
1
)
def test_get_query_count_sanity(self):
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment3 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment4 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent1 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment2, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment3, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment4, grading_points=30)
relatedstudent2 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment2, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment3, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment4, grading_points=30)
relatedstudent3 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment2, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment3, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment4, grading_points=30)
relatedstudent4 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment1, grading_points=25)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment2, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment3, grading_points=30)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment4, grading_points=30)
requestuser = mommy.make(settings.AUTH_USER_MODEL)
with self.assertNumQueries(5):
self.mock_getrequest(
requestuser=requestuser,
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id,
test_assignment3.id, test_assignment4.id],
'points_threshold': 50,
'from_select_assignment_view': ''
})
def test_post_query_count_sanity(self):
# Trigger ContentType caching so we do not get an extra lookup in the
# assertNumQueries() statement below.
ContentType.objects.get_for_model(Assignment)
current_assignment = mommy.make('core.Assignment')
test_assignment1 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment2 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment3 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
test_assignment4 = mommy.make('core.Assignment', parentnode=current_assignment.parentnode, max_points=50)
relatedstudent1 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment2, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment3, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent1, assignment=test_assignment4, grading_points=50)
relatedstudent2 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment2, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment3, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent2, assignment=test_assignment4, grading_points=50)
relatedstudent3 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment2, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment3, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent3, assignment=test_assignment4, grading_points=50)
relatedstudent4 = mommy.make('core.RelatedStudent', period=current_assignment.parentnode)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment1, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment2, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment3, grading_points=50)
self.__make_published_feedbackset_for_relatedstudent(
relatedstudent=relatedstudent4, assignment=test_assignment4, grading_points=50)
requestuser = mommy.make(settings.AUTH_USER_MODEL)
with self.assertNumQueries(9):
self.mock_http302_postrequest(
requestuser=requestuser,
cradmin_role=current_assignment,
sessionmock={
'selected_assignment_ids': [test_assignment1.id, test_assignment2.id,
test_assignment3.id, test_assignment4.id],
'points_threshold': 50,
'from_select_assignment_view': ''
},
requestkwargs={
'data': {
'confirm': ''
}})
| 59.353247
| 120
| 0.68148
| 4,257
| 45,702
| 6.971106
| 0.052384
| 0.088219
| 0.05213
| 0.062778
| 0.918419
| 0.903491
| 0.880442
| 0.861706
| 0.854933
| 0.840511
| 0
| 0.020163
| 0.237123
| 45,702
| 769
| 121
| 59.430429
| 0.831005
| 0.002254
| 0
| 0.751397
| 0
| 0
| 0.137709
| 0.057527
| 0
| 0
| 0
| 0
| 0.081006
| 1
| 0.048883
| false
| 0.00419
| 0.01676
| 0
| 0.071229
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df2cffd4087f8bfcc48e1706c138472c0274d32b
| 16,533
|
py
|
Python
|
tensorflow_quantum/python/differentiators/parameter_shift.py
|
we-taper/quantum
|
64b1efac36cd5b5026c8303bd107766a763987d8
|
[
"Apache-2.0"
] | 1
|
2021-02-05T20:21:23.000Z
|
2021-02-05T20:21:23.000Z
|
tensorflow_quantum/python/differentiators/parameter_shift.py
|
we-taper/quantum
|
64b1efac36cd5b5026c8303bd107766a763987d8
|
[
"Apache-2.0"
] | 1
|
2021-02-24T10:43:26.000Z
|
2021-02-24T10:43:26.000Z
|
tensorflow_quantum/python/differentiators/parameter_shift.py
|
isabella232/quantum-1
|
b95f08b7351b35ae353fd0789ae3a90034343b1a
|
[
"Apache-2.0"
] | 1
|
2021-11-02T18:52:06.000Z
|
2021-11-02T18:52:06.000Z
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute analytic gradients by using general parameter-shift rule. """
import tensorflow as tf
from tensorflow_quantum.python.differentiators import differentiator
from tensorflow_quantum.python.differentiators import parameter_shift_util
class ParameterShift(differentiator.Differentiator):
"""Calculate the general version of parameter-shift rule based gradients.
This ParameterShift is the gradient estimator of the following paper:
[arXiv:1905.13311](https://arxiv.org/abs/1905.13311), Gavin E. Crooks.
This ParameterShift is used for any programs with parameterized gates.
It internally decomposes any programs into array of gates with at most
two distinct eigenvalues.
>>> non_diff_op = tfq.get_expectation_op()
>>> linear_differentiator = tfq.differentiators.ParameterShift()
>>> # Get an expectation op, with this differentiator attached.
>>> op = linear_differentiator.generate_differentiable_op(
... analytic_op=non_diff_op
... )
>>> qubit = cirq.GridQubit(0, 0)
>>> circuit = tfq.convert_to_tensor([
... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha'))
... ])
>>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]])
>>> symbol_values_array = np.array([[0.123]], dtype=np.float32)
>>> # Calculate tfq gradient.
>>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
>>> with tf.GradientTape() as g:
... g.watch(symbol_values_tensor)
... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums)
>>> # This value is now computed via the ParameterShift rule.
>>> # https://arxiv.org/abs/1905.13311
>>> grads = g.gradient(expectations, symbol_values_tensor)
>>> grads
tf.Tensor([[-1.1839752]], shape=(1, 1), dtype=float32)
"""
@tf.function
def get_gradient_circuits(self, programs, symbol_names, symbol_values):
"""See base class description."""
raise NotImplementedError(
"Gradient circuits are not currently available for "
"ParameterShift.")
@tf.function
def differentiate_analytic(self, programs, symbol_names, symbol_values,
pauli_sums, forward_pass_vals, grad):
"""Calculate the gradient.
The gradient calculations follows the following steps:
1. Compute the decomposition of the incoming circuits so that we have
their generator information (done using cirq in a tf.py_function)
2. Use formula (31) from paper inside of TensorFlow to calculate
gradients from all the decomposed circuits.
3. Sum up terms and reshape for the total gradient that is compatible
with TensorFlow.
**CAUTION**
Analytic gradient measurements based on this ParameterShift generally
run at least K(=2) times SLOWER than the original circuit.
On top of it, since all parameters of gates are shifted individually,
the time complexity is linear in the number of parameterized gates L.
So, you will see O(KL) slower time & space complexity than the original
forward pass measurements.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
Backward gradient values for each program & each pauli sum. It has
the shape of [batch_size, n_symbols].
"""
# these get used a lot
n_symbols = tf.gather(tf.shape(symbol_names), 0)
n_programs = tf.gather(tf.shape(programs), 0)
n_ops = tf.gather(tf.shape(pauli_sums), 1)
# Assume cirq.decompose() generates gates with at most two distinct
# eigenvalues, which results in two parameter shifts.
n_shifts = 2
# STEP 1: Generate required inputs for executor
# Deserialize programs and parse the whole parameterized gates
# new_programs has [n_symbols, n_param_gates, n_shifts, n_programs].
# These new_programs has programs that parameter-shift rule is applied,
# so those programs has
(new_programs, weights, shifts,
n_param_gates) = parameter_shift_util.parse_programs(
programs, symbol_names, symbol_values, n_symbols)
# Reshape & transpose new_programs, weights and shifts to fit into
# the input format of tensorflow_quantum simulator.
# [n_symbols, n_param_gates, n_shifts, n_programs]
new_programs = tf.transpose(new_programs, [0, 2, 3, 1])
weights = tf.transpose(weights, [0, 2, 3, 1])
shifts = tf.transpose(shifts, [0, 2, 3, 1])
# reshape everything to fit into expectation op correctly
total_programs = n_programs * n_shifts * n_param_gates * n_symbols
# tile up and then reshape to order programs correctly
flat_programs = tf.reshape(new_programs, [total_programs])
flat_shifts = tf.reshape(shifts, [total_programs])
# tile up and then reshape to order ops correctly
n_tile = n_shifts * n_param_gates * n_symbols
flat_perturbations = tf.concat([
tf.reshape(
tf.tile(tf.expand_dims(symbol_values, 0),
tf.stack([n_tile, 1, 1])), [total_programs, n_symbols]),
tf.expand_dims(flat_shifts, axis=1)
],
axis=1)
flat_ops = tf.reshape(
tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_tile, 1, 1])),
[total_programs, n_ops])
# Append impurity symbol into symbol name
new_symbol_names = tf.concat([
symbol_names,
tf.expand_dims(tf.constant(
parameter_shift_util._PARAMETER_IMPURITY_NAME),
axis=0)
],
axis=0)
# STEP 2: calculate the required expectation values
expectations = self.expectation_op(flat_programs, new_symbol_names,
flat_perturbations, flat_ops)
# STEP 3: generate gradients according to the results
# we know the rows are grouped according to which parameter
# was perturbed, so reshape to reflect that
grouped_expectations = tf.reshape(
expectations,
[n_symbols, n_shifts * n_programs * n_param_gates, -1])
# now we can calculate the partial of the circuit output with
# respect to each perturbed parameter
def rearrange_expectations(grouped):
def split_vertically(i):
return tf.slice(grouped, [i * n_programs, 0],
[n_programs, n_ops])
return tf.map_fn(split_vertically,
tf.range(n_param_gates * n_shifts),
dtype=tf.float32)
# reshape so that expectations calculated on different programs are
# separated by a dimension
rearranged_expectations = tf.map_fn(rearrange_expectations,
grouped_expectations)
# now we will calculate all of the partial derivatives
partials = tf.einsum(
'spco,spc->sco', rearranged_expectations,
tf.cast(
tf.reshape(weights,
[n_symbols, n_param_gates * n_shifts, n_programs]),
rearranged_expectations.dtype))
# now apply the chain rule
return tf.einsum('sco,co -> cs', partials, grad)
@tf.function
def differentiate_sampled(self, programs, symbol_names, symbol_values,
pauli_sums, num_samples, forward_pass_vals, grad):
"""Calculate the gradient.
The gradient calculations follows the following steps:
1. Compute the decomposition of the incoming circuits so that we have
their generator information (done using cirq in a tf.py_function)
2. Use formula (31) from paper inside of TensorFlow to calculate
gradients from all the decomposed circuits.
3. Sum up terms and reshape for the total gradient that is compatible
with TensorFlow.
**CAUTION**
Analytic gradient measurements based on this ParameterShift generally
run at least K(=2) times SLOW than the original circuit.
On top of it, since all parameters of gates are shifted individually,
the time complexity is linear in the number of parameterized gates L.
So, you will see O(KL) slower time & space complexity than the original
forward pass measurements.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
num_samples: `tf.Tensor` of positiver integers indicating the number
of samples used per term to calculate the expectation value
in the forward pass.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
Backward gradient values for each program & each pauli sum. It has
the shape of [batch_size, n_symbols].
"""
# these get used a lot
n_symbols = tf.gather(tf.shape(symbol_names), 0)
n_programs = tf.gather(tf.shape(programs), 0)
n_ops = tf.gather(tf.shape(pauli_sums), 1)
# Assume cirq.decompose() generates gates with at most two distinct
# eigenvalues, which results in two parameter shifts.
n_shifts = 2
# STEP 1: Generate required inputs for executor
# Deserialize programs and parse the whole parameterized gates
# new_programs has [n_symbols, n_param_gates, n_shifts, n_programs].
# These new_programs has programs that parameter-shift rule is applied,
# so those programs has
(new_programs, weights, shifts,
n_param_gates) = parameter_shift_util.parse_programs(
programs, symbol_names, symbol_values, n_symbols)
# Reshape & transpose new_programs, weights and shifts to fit into
# the input format of tensorflow_quantum simulator.
# [n_symbols, n_param_gates, n_shifts, n_programs]
new_programs = tf.transpose(new_programs, [0, 2, 3, 1])
weights = tf.transpose(weights, [0, 2, 3, 1])
shifts = tf.transpose(shifts, [0, 2, 3, 1])
# reshape everything to fit into expectation op correctly
total_programs = n_programs * n_shifts * n_param_gates * n_symbols
# tile up and then reshape to order programs correctly
flat_programs = tf.reshape(new_programs, [total_programs])
flat_shifts = tf.reshape(shifts, [total_programs])
# tile up and then reshape to order ops correctly
n_tile = n_shifts * n_param_gates * n_symbols
flat_perturbations = tf.concat([
tf.reshape(
tf.tile(tf.expand_dims(symbol_values, 0),
tf.stack([n_tile, 1, 1])), [total_programs, n_symbols]),
tf.expand_dims(flat_shifts, axis=1)
],
axis=1)
flat_ops = tf.reshape(
tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_tile, 1, 1])),
[total_programs, n_ops])
flat_num_samples = tf.reshape(
tf.tile(tf.expand_dims(num_samples, 0), tf.stack([n_tile, 1, 1])),
[total_programs, n_ops])
# Append impurity symbol into symbol name
new_symbol_names = tf.concat([
symbol_names,
tf.expand_dims(tf.constant(
parameter_shift_util._PARAMETER_IMPURITY_NAME),
axis=0)
],
axis=0)
# STEP 2: calculate the required expectation values
expectations = self.expectation_op(flat_programs, new_symbol_names,
flat_perturbations, flat_ops,
flat_num_samples)
# STEP 3: generate gradients according to the results
# we know the rows are grouped according to which parameter
# was perturbed, so reshape to reflect that
grouped_expectations = tf.reshape(
expectations,
[n_symbols, n_shifts * n_programs * n_param_gates, -1])
# now we can calculate the partial of the circuit output with
# respect to each perturbed parameter
def rearrange_expectations(grouped):
def split_vertically(i):
return tf.slice(grouped, [i * n_programs, 0],
[n_programs, n_ops])
return tf.map_fn(split_vertically,
tf.range(n_param_gates * n_shifts),
dtype=tf.float32)
# reshape so that expectations calculated on different programs are
# separated by a dimension
rearranged_expectations = tf.map_fn(rearrange_expectations,
grouped_expectations)
# now we will calculate all of the partial derivatives
partials = tf.einsum(
'spco,spc->sco', rearranged_expectations,
tf.cast(
tf.reshape(weights,
[n_symbols, n_param_gates * n_shifts, n_programs]),
rearranged_expectations.dtype))
# now apply the chain rule
return tf.einsum('sco,co -> cs', partials, grad)
| 47.372493
| 80
| 0.628501
| 2,031
| 16,533
| 4.965534
| 0.165928
| 0.009916
| 0.017452
| 0.014279
| 0.815369
| 0.811006
| 0.793059
| 0.786713
| 0.777987
| 0.777987
| 0
| 0.011719
| 0.298071
| 16,533
| 348
| 81
| 47.508621
| 0.857303
| 0.54358
| 0
| 0.861789
| 0
| 0
| 0.017141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056911
| false
| 0.01626
| 0.02439
| 0.01626
| 0.138211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df3369e49701726fc7595e826283cd0f494a82d8
| 123
|
py
|
Python
|
examples/genus-2.py
|
imallett/ImplicitGrapher
|
8a474dc3a5625392ea7da82ec0f493bdce5a357a
|
[
"MIT"
] | 4
|
2017-06-23T16:25:01.000Z
|
2020-07-21T02:23:36.000Z
|
examples/genus-2.py
|
imallett/ImplicitGrapher
|
8a474dc3a5625392ea7da82ec0f493bdce5a357a
|
[
"MIT"
] | null | null | null |
examples/genus-2.py
|
imallett/ImplicitGrapher
|
8a474dc3a5625392ea7da82ec0f493bdce5a357a
|
[
"MIT"
] | null | null | null |
def f(pos):
x,y,z = pos
return 2.0*z*(z*z - 3.0*x*x)*(1.0 - y*y) + (x*x + z*z)**2 - (2.0*y*y - 1.0)*(1.0 - y*y)
| 30.75
| 92
| 0.382114
| 36
| 123
| 1.305556
| 0.305556
| 0.12766
| 0.191489
| 0.170213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.260163
| 123
| 3
| 93
| 41
| 0.373626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
df416abdfe8fc6934f0b0051d004ef5cfc1e82fb
| 177
|
py
|
Python
|
deps/riak_pb/riak_pb/__init__.py
|
pexip/os-riak
|
9e64fb0412121776c971c8f04e8c96df9f2a31de
|
[
"Apache-2.0"
] | null | null | null |
deps/riak_pb/riak_pb/__init__.py
|
pexip/os-riak
|
9e64fb0412121776c971c8f04e8c96df9f2a31de
|
[
"Apache-2.0"
] | null | null | null |
deps/riak_pb/riak_pb/__init__.py
|
pexip/os-riak
|
9e64fb0412121776c971c8f04e8c96df9f2a31de
|
[
"Apache-2.0"
] | 11
|
2015-02-11T21:57:01.000Z
|
2018-07-25T21:30:12.000Z
|
from riak_pb.riak_pb2 import *
from riak_pb.riak_kv_pb2 import *
from riak_pb.riak_search_pb2 import *
from riak_pb.riak_dt_pb2 import *
from riak_pb.riak_yokozuna_pb2 import *
| 29.5
| 39
| 0.830508
| 34
| 177
| 3.911765
| 0.264706
| 0.300752
| 0.37594
| 0.526316
| 0.691729
| 0.691729
| 0
| 0
| 0
| 0
| 0
| 0.031847
| 0.112994
| 177
| 5
| 40
| 35.4
| 0.815287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
df4acf59c7466f9f51992179a04282eaf6649c10
| 48
|
py
|
Python
|
testcookies.py
|
IvanZyf666/cgi-lab
|
380f4285ea963ac2261e3ea57e7539509ac471a1
|
[
"Apache-2.0"
] | null | null | null |
testcookies.py
|
IvanZyf666/cgi-lab
|
380f4285ea963ac2261e3ea57e7539509ac471a1
|
[
"Apache-2.0"
] | null | null | null |
testcookies.py
|
IvanZyf666/cgi-lab
|
380f4285ea963ac2261e3ea57e7539509ac471a1
|
[
"Apache-2.0"
] | null | null | null |
import os
def test_cookies():
print("Set")
| 16
| 20
| 0.645833
| 7
| 48
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 48
| 3
| 21
| 16
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
df6f78dabb66286ba46927b9461738dc28e0711a
| 1,480
|
py
|
Python
|
FATS/import_lc_cluster.py
|
serdarozsoy/FATS
|
e2a1bf4f142c20eada5d0d63435599e9139d4a9d
|
[
"MIT"
] | null | null | null |
FATS/import_lc_cluster.py
|
serdarozsoy/FATS
|
e2a1bf4f142c20eada5d0d63435599e9139d4a9d
|
[
"MIT"
] | null | null | null |
FATS/import_lc_cluster.py
|
serdarozsoy/FATS
|
e2a1bf4f142c20eada5d0d63435599e9139d4a9d
|
[
"MIT"
] | null | null | null |
<<<<<<< HEAD
#from Feature import FeatureSpace
import numpy as np
class ReadLC_MACHO:
def __init__(self,lc):
self.content1=lc
def ReadLC(self):
data = []
mjd = []
error = []
# Opening the blue band
#fid = open(self.id,'r')
self.content1 = self.content1[3:]
for i in xrange(len(self.content1)):
if not self.content1[i]:
break
else:
content = self.content1[i].split(' ')
mjd.append(float(content[0]))
data.append(float(content[1]))
error.append(float(content[2]))
# Opening the red band
return [data, mjd, error]
=======
#from Feature import FeatureSpace
import numpy as np
class ReadLC_MACHO:
def __init__(self,lc):
self.content1=lc
def ReadLC(self):
data = []
mjd = []
error = []
# Opening the blue band
#fid = open(self.id,'r')
self.content1 = self.content1[3:]
for i in xrange(len(self.content1)):
if not self.content1[i]:
break
else:
content = self.content1[i].split(' ')
mjd.append(float(content[0]))
data.append(float(content[1]))
error.append(float(content[2]))
# Opening the red band
return [data, mjd, error]
>>>>>>> e5e6c78995f79de751f6aa5e3ad47cb15bd3fffc
| 20
| 53
| 0.512162
| 162
| 1,480
| 4.617284
| 0.290123
| 0.192513
| 0.144385
| 0.07754
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0
| 0.042508
| 0.364189
| 1,480
| 74
| 54
| 20
| 0.752391
| 0.132432
| 0
| 0.923077
| 0
| 0
| 0.001566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.051282
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
10d1a9bf87c2cec6f6accc8731757f75d95e38ab
| 48
|
py
|
Python
|
analysis/technical_analysis_cuda/tacuda/__init__.py
|
AleksandrIvanov89/trading
|
36672cb47dee2c4a284d611b2a2bd0741f5c1c3f
|
[
"MIT"
] | null | null | null |
analysis/technical_analysis_cuda/tacuda/__init__.py
|
AleksandrIvanov89/trading
|
36672cb47dee2c4a284d611b2a2bd0741f5c1c3f
|
[
"MIT"
] | null | null | null |
analysis/technical_analysis_cuda/tacuda/__init__.py
|
AleksandrIvanov89/trading
|
36672cb47dee2c4a284d611b2a2bd0741f5c1c3f
|
[
"MIT"
] | null | null | null |
from .ta_cuda import *
from .ta_kernels import *
| 24
| 25
| 0.770833
| 8
| 48
| 4.375
| 0.625
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 25
| 24
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8006439c17d4cf5e4623287a00f579be9808b20f
| 18,115
|
py
|
Python
|
sdk/python/pulumi_azure/backup/_inputs.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/backup/_inputs.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/backup/_inputs.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'PolicyFileShareBackupArgs',
'PolicyFileShareRetentionDailyArgs',
'PolicyFileShareRetentionMonthlyArgs',
'PolicyFileShareRetentionWeeklyArgs',
'PolicyFileShareRetentionYearlyArgs',
'PolicyVMBackupArgs',
'PolicyVMRetentionDailyArgs',
'PolicyVMRetentionMonthlyArgs',
'PolicyVMRetentionWeeklyArgs',
'PolicyVMRetentionYearlyArgs',
]
@pulumi.input_type
class PolicyFileShareBackupArgs:
def __init__(__self__, *,
frequency: pulumi.Input[str],
time: pulumi.Input[str]):
"""
:param pulumi.Input[str] frequency: Sets the backup frequency. Currently, only `Daily` is supported
:param pulumi.Input[str] time: The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.)
"""
pulumi.set(__self__, "frequency", frequency)
pulumi.set(__self__, "time", time)
@property
@pulumi.getter
def frequency(self) -> pulumi.Input[str]:
"""
Sets the backup frequency. Currently, only `Daily` is supported
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: pulumi.Input[str]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.)
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@pulumi.input_type
class PolicyFileShareRetentionDailyArgs:
def __init__(__self__, *,
count: pulumi.Input[int]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `10`
"""
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `10`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@pulumi.input_type
class PolicyFileShareRetentionMonthlyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]],
weeks: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `10`
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weeks: The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "weekdays", weekdays)
pulumi.set(__self__, "weeks", weeks)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `10`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@property
@pulumi.getter
def weeks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
return pulumi.get(self, "weeks")
@weeks.setter
def weeks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weeks", value)
@pulumi.input_type
class PolicyFileShareRetentionWeeklyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `10`
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "weekdays", weekdays)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `10`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@pulumi.input_type
class PolicyFileShareRetentionYearlyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
months: pulumi.Input[Sequence[pulumi.Input[str]]],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]],
weeks: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `10`
:param pulumi.Input[Sequence[pulumi.Input[str]]] months: The months of the year to retain backups of. Must be one of `January`, `February`, `March`, `April`, `May`, `June`, `July`, `Augest`, `September`, `October`, `November` and `December`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weeks: The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "months", months)
pulumi.set(__self__, "weekdays", weekdays)
pulumi.set(__self__, "weeks", weeks)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `10`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def months(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The months of the year to retain backups of. Must be one of `January`, `February`, `March`, `April`, `May`, `June`, `July`, `Augest`, `September`, `October`, `November` and `December`.
"""
return pulumi.get(self, "months")
@months.setter
def months(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "months", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@property
@pulumi.getter
def weeks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
return pulumi.get(self, "weeks")
@weeks.setter
def weeks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weeks", value)
@pulumi.input_type
class PolicyVMBackupArgs:
def __init__(__self__, *,
frequency: pulumi.Input[str],
time: pulumi.Input[str],
weekdays: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] frequency: Sets the backup frequency. Must be either `Daily` or`Weekly`.
:param pulumi.Input[str] time: The time of day to perform the backup in 24hour format.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
pulumi.set(__self__, "frequency", frequency)
pulumi.set(__self__, "time", time)
if weekdays is not None:
pulumi.set(__self__, "weekdays", weekdays)
@property
@pulumi.getter
def frequency(self) -> pulumi.Input[str]:
"""
Sets the backup frequency. Must be either `Daily` or`Weekly`.
"""
return pulumi.get(self, "frequency")
@frequency.setter
def frequency(self, value: pulumi.Input[str]):
pulumi.set(self, "frequency", value)
@property
@pulumi.getter
def time(self) -> pulumi.Input[str]:
"""
The time of day to perform the backup in 24hour format.
"""
return pulumi.get(self, "time")
@time.setter
def time(self, value: pulumi.Input[str]):
pulumi.set(self, "time", value)
@property
@pulumi.getter
def weekdays(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "weekdays", value)
@pulumi.input_type
class PolicyVMRetentionDailyArgs:
def __init__(__self__, *,
count: pulumi.Input[int]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `9999`
"""
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `9999`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@pulumi.input_type
class PolicyVMRetentionMonthlyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]],
weeks: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `9999`
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weeks: The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "weekdays", weekdays)
pulumi.set(__self__, "weeks", weeks)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `9999`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@property
@pulumi.getter
def weeks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
return pulumi.get(self, "weeks")
@weeks.setter
def weeks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weeks", value)
@pulumi.input_type
class PolicyVMRetentionWeeklyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `9999`
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "weekdays", weekdays)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `9999`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@pulumi.input_type
class PolicyVMRetentionYearlyArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
months: pulumi.Input[Sequence[pulumi.Input[str]]],
weekdays: pulumi.Input[Sequence[pulumi.Input[str]]],
weeks: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[int] count: The number of yearly backups to keep. Must be between `1` and `9999`
:param pulumi.Input[Sequence[pulumi.Input[str]]] months: The months of the year to retain backups of. Must be one of `January`, `February`, `March`, `April`, `May`, `June`, `July`, `August`, `September`, `October`, `November` and `December`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weekdays: The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] weeks: The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "months", months)
pulumi.set(__self__, "weekdays", weekdays)
pulumi.set(__self__, "weeks", weeks)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
The number of yearly backups to keep. Must be between `1` and `9999`
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def months(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The months of the year to retain backups of. Must be one of `January`, `February`, `March`, `April`, `May`, `June`, `July`, `August`, `September`, `October`, `November` and `December`.
"""
return pulumi.get(self, "months")
@months.setter
def months(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "months", value)
@property
@pulumi.getter
def weekdays(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weekday backups to retain . Must be one of `Sunday`, `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday` or `Saturday`.
"""
return pulumi.get(self, "weekdays")
@weekdays.setter
def weekdays(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weekdays", value)
@property
@pulumi.getter
def weeks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The weeks of the month to retain backups of. Must be one of `First`, `Second`, `Third`, `Fourth`, `Last`.
"""
return pulumi.get(self, "weeks")
@weeks.setter
def weeks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "weeks", value)
| 38.542553
| 249
| 0.619763
| 2,171
| 18,115
| 5.081529
| 0.066329
| 0.16153
| 0.086294
| 0.117839
| 0.920776
| 0.918963
| 0.918963
| 0.910624
| 0.910624
| 0.895667
| 0
| 0.00702
| 0.237207
| 18,115
| 469
| 250
| 38.624733
| 0.791359
| 0.353795
| 0
| 0.879121
| 1
| 0
| 0.069506
| 0.024963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.21978
| false
| 0
| 0.018315
| 0
| 0.3663
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
803eb06ef8cf0f51ead350bb003a86d24b2f6811
| 102
|
py
|
Python
|
week1_00_packages/project/project_file_8.py
|
SaidMuratbekov/msai-python
|
fc694d9d6571af8dbdf162a35f98b6ffdd079396
|
[
"MIT"
] | null | null | null |
week1_00_packages/project/project_file_8.py
|
SaidMuratbekov/msai-python
|
fc694d9d6571af8dbdf162a35f98b6ffdd079396
|
[
"MIT"
] | null | null | null |
week1_00_packages/project/project_file_8.py
|
SaidMuratbekov/msai-python
|
fc694d9d6571af8dbdf162a35f98b6ffdd079396
|
[
"MIT"
] | null | null | null |
# project_file_8.py
from project_file_7 import *
from project_file_6 import *
print(PROJECT_VAR)
# 1
| 14.571429
| 28
| 0.794118
| 18
| 102
| 4.111111
| 0.611111
| 0.445946
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.137255
| 102
| 6
| 29
| 17
| 0.795455
| 0.186275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3390fb7c3beac231b47be3227fd06832d47d448f
| 20,124
|
py
|
Python
|
src/tests/unit/autoks/test_model_selection.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | 2
|
2019-04-29T15:18:11.000Z
|
2019-12-13T18:58:40.000Z
|
src/tests/unit/autoks/test_model_selection.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | 275
|
2019-02-19T22:59:39.000Z
|
2020-10-03T08:56:08.000Z
|
src/tests/unit/autoks/test_model_selection.py
|
lschlessinger1/MS-project
|
e1c02d1d1a7a2480ff6f14f30625dc42ee3417e3
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import MagicMock, patch
import numpy as np
from GPy.kern import RationalQuadratic, RBF, LinScaleShift
from src.autoks.core.covariance import Covariance
from src.autoks.core.gp_model import GPModel
from src.autoks.core.gp_model_population import ActiveModelPopulation
from src.autoks.core.grammar import CKSGrammar, GeometricRandomGrammar
from src.autoks.core.model_selection import EvolutionaryModelSelector
from src.autoks.core.model_selection.base import ModelSelector
from src.autoks.core.model_selection.boms_model_selector import BomsModelSelector
from src.autoks.core.model_selection.cks_model_selector import CKSModelSelector
from src.evalg.serialization import Serializable
class TestModelSelector(TestCase):
def setUp(self):
self.gp_models = [GPModel(Covariance(RationalQuadratic(1))), GPModel(Covariance(RBF(1) + RBF(1))),
GPModel(Covariance(RBF(1)))]
grammar = GeometricRandomGrammar()
grammar.build(n_dims=1)
fitness_fn = 'nbic'
self.x_train = np.array([[1, 2, 3], [4, 5, 6]])
self.y_train = np.array([[5], [10]])
self.x_test = np.array([[10, 20, 30], [40, 50, 60]])
self.y_test = np.array([[2], [1]])
self.model_selector = ModelSelector(grammar, fitness_fn)
@patch('src.autoks.core.grammar.BaseGrammar.get_candidates')
def test_propose_new_models(self, mock_get_candidates):
expected = [Covariance(RBF(1)), Covariance(RationalQuadratic(1)), Covariance(RBF(1) + RationalQuadratic(1)),
Covariance(RBF(1) * RationalQuadratic(1))]
mock_get_candidates.return_value = expected
pop = ActiveModelPopulation()
pop.update(self.gp_models)
actual = self.model_selector._propose_new_models(pop, callbacks=MagicMock())
self.assertIsInstance(actual, list)
self.assertEqual(len(expected), len(actual))
for expected_cov, actual_cov in zip(expected, actual):
self.assertEqual(expected_cov.infix, actual_cov.covariance.infix)
def test_to_dict(self):
test_cases = (
False, True
)
for built in test_cases:
with self.subTest(name=built):
if built:
self.model_selector._prepare_data(self.x_train, self.y_train)
actual = self.model_selector.to_dict()
self.assertIsInstance(actual, dict)
self.assertIn('grammar', actual)
self.assertIn('fitness_fn', actual)
self.assertIn('n_parents', actual)
self.assertIn('n_evals', actual)
self.assertIn('additive_form', actual)
self.assertIn('optimizer', actual)
self.assertIn('n_restarts_optimizer', actual)
self.assertIn('standardize_x', actual)
self.assertIn('standardize_y', actual)
self.assertIn('total_eval_time', actual)
self.assertIn('total_expansion_time', actual)
self.assertIn('total_model_search_time', actual)
self.assertIn('gp_fn_name', actual)
self.assertIn('gp_args', actual)
self.assertIn('name', actual)
self.assertIn('built', actual)
self.assertIn('selected_models', actual)
self.assertIn('_x_train_mean', actual)
self.assertIn('_x_train_std', actual)
self.assertIn('_y_train_mean', actual)
self.assertIn('_y_train_std', actual)
self.assertEqual(self.model_selector.grammar.to_dict(), actual['grammar'])
self.assertEqual(self.model_selector.fitness_fn_name, actual['fitness_fn'])
self.assertEqual(self.model_selector.n_parents, actual['n_parents'])
self.assertEqual(self.model_selector.n_evals, actual['n_evals'])
self.assertEqual(self.model_selector.additive_form, actual['additive_form'])
self.assertEqual(self.model_selector.optimizer, actual['optimizer'])
self.assertEqual(self.model_selector.n_restarts_optimizer, actual['n_restarts_optimizer'])
self.assertEqual(self.model_selector.standardize_x, actual['standardize_x'])
self.assertEqual(self.model_selector.standardize_y, actual['standardize_y'])
self.assertEqual(self.model_selector.total_eval_time, actual['total_eval_time'])
self.assertEqual(self.model_selector.total_expansion_time, actual['total_expansion_time'])
self.assertEqual(self.model_selector.total_model_search_time, actual['total_model_search_time'])
self.assertEqual(self.model_selector._gp_fn_name, actual['gp_fn_name'])
self.assertEqual(self.model_selector.name, actual['name'])
self.assertEqual(self.model_selector.built, actual['built'])
expected_selected_models = [m.to_dict() for m in self.model_selector.selected_models]
actual_selected_models = [m.to_dict() for m in actual['selected_models']]
self.assertEqual(expected_selected_models, actual_selected_models)
if not built:
self.assertEqual(self.model_selector._x_train_mean, actual['_x_train_mean'])
self.assertEqual(self.model_selector._x_train_std, actual['_x_train_std'])
self.assertEqual(self.model_selector._y_train_mean, actual['_y_train_mean'])
self.assertEqual(self.model_selector._y_train_std, actual['_y_train_std'])
else:
self.assertEqual(self.model_selector._x_train_mean.tolist(), actual['_x_train_mean'])
self.assertEqual(self.model_selector._x_train_std.tolist(), actual['_x_train_std'])
self.assertEqual(self.model_selector._y_train_mean.tolist(), actual['_y_train_mean'])
self.assertEqual(self.model_selector._y_train_std.tolist(), actual['_y_train_std'])
def test_from_dict(self):
test_cases_built = (False, True)
for built in test_cases_built:
with self.subTest(built=built):
test_cases_cls = (ModelSelector, Serializable)
for cls in test_cases_cls:
with self.subTest(cls=cls):
if built:
self.model_selector._prepare_data(self.x_train, self.y_train)
actual = cls.from_dict(self.model_selector.to_dict())
self.assertIsInstance(actual, ModelSelector)
self.assertEqual(self.model_selector.grammar.__class__.__name__,
actual.grammar.__class__.__name__)
self.assertEqual(self.model_selector.fitness_fn_name, actual.fitness_fn_name)
self.assertEqual(self.model_selector.fitness_fn, actual.fitness_fn)
self.assertEqual(self.model_selector.n_parents, actual.n_parents)
self.assertEqual(self.model_selector.n_evals, actual.n_evals)
self.assertEqual(self.model_selector.additive_form, actual.additive_form)
self.assertEqual(self.model_selector.optimizer, actual.optimizer)
self.assertEqual(self.model_selector.n_restarts_optimizer, actual.n_restarts_optimizer)
self.assertEqual(self.model_selector.standardize_x, actual.standardize_x)
self.assertEqual(self.model_selector.standardize_y, actual.standardize_y)
self.assertEqual(self.model_selector.total_eval_time, actual.total_eval_time)
self.assertEqual(self.model_selector.total_expansion_time, actual.total_expansion_time)
self.assertEqual(self.model_selector.total_model_search_time, actual.total_model_search_time)
self.assertEqual(self.model_selector._gp_fn_name, actual._gp_fn_name)
self.assertEqual(self.model_selector._gp_args, actual._gp_args)
self.assertEqual(self.model_selector.name, actual.name)
self.assertEqual(self.model_selector.built, actual.built)
self.assertEqual(self.model_selector.selected_models, actual.selected_models)
if not built:
self.assertIsNone(actual._x_train_mean)
self.assertIsNone(actual._x_train_std)
self.assertIsNone(actual._y_train_mean)
self.assertIsNone(actual._y_train_std)
else:
self.assertEqual(self.model_selector._x_train_mean.tolist(), actual._x_train_mean.tolist())
self.assertEqual(self.model_selector._x_train_std.tolist(), actual._x_train_std.tolist())
self.assertEqual(self.model_selector._y_train_mean.tolist(), actual._y_train_mean.tolist())
self.assertEqual(self.model_selector._y_train_std.tolist(), actual._y_train_std.tolist())
class TestCKSModelSelector(TestCase):
def setUp(self):
self.se0 = Covariance(RBF(1, active_dims=[0]))
self.se1 = Covariance(RBF(1, active_dims=[1]))
self.se2 = Covariance(RBF(1, active_dims=[2]))
self.rq0 = Covariance(RationalQuadratic(1, active_dims=[0]))
self.rq1 = Covariance(RationalQuadratic(1, active_dims=[1]))
self.rq2 = Covariance(RationalQuadratic(1, active_dims=[2]))
self.lin0 = Covariance(LinScaleShift(1, active_dims=[0]))
def test_get_initial_candidate_covariances(self):
grammar = CKSGrammar(base_kernel_names=['SE', 'RQ'])
grammar.build(n_dims=2)
model_selector = CKSModelSelector(grammar)
actual = model_selector._get_initial_candidate_covariances()
expected = [self.se0, self.se1, self.rq0, self.rq1]
self.assertIsInstance(actual, list)
self.assertEqual(len(expected), len(actual))
for expected_cov, actual_cov in zip(expected, actual):
self.assertEqual(expected_cov.infix, actual_cov.infix)
grammar = CKSGrammar(base_kernel_names=['SE', 'RQ'])
grammar.build(n_dims=1)
model_selector = CKSModelSelector(grammar)
actual = model_selector._get_initial_candidate_covariances()
expected = [self.se0, self.rq0]
self.assertIsInstance(actual, list)
self.assertEqual(len(expected), len(actual))
for expected_cov, actual_cov in zip(expected, actual):
self.assertEqual(expected_cov.infix, actual_cov.infix)
class TestBomsModelSelector(TestCase):
def setUp(self):
self.gp_models = [GPModel(Covariance(RationalQuadratic(1))), GPModel(Covariance(RBF(1) + RBF(1))),
GPModel(Covariance(RBF(1)))]
grammar = MagicMock()
kernel_selector = MagicMock()
objective = MagicMock()
self.x_train = np.array([[1, 2, 3], [4, 5, 6]])
self.y_train = np.array([[5], [10]])
self.x_test = np.array([[10, 20, 30], [40, 50, 60]])
self.y_test = np.array([[2], [1]])
self.model_selector = BomsModelSelector(grammar, kernel_selector, objective)
class TestEvolutionaryModelSelector(TestCase):
def setUp(self) -> None:
self.x_train = np.array([[1, 2, 3], [4, 5, 6]])
self.y_train = np.array([[5], [10]])
self.x_test = np.array([[10, 20, 30], [40, 50, 60]])
self.y_test = np.array([[2], [1]])
self.model_selector = EvolutionaryModelSelector()
def test_to_dict(self):
test_cases = (
False, True
)
for built in test_cases:
with self.subTest(name=built):
if built:
self.model_selector._prepare_data(self.x_train, self.y_train)
actual = self.model_selector.to_dict()
self.assertIsInstance(actual, dict)
self.assertIn('grammar', actual)
self.assertIn('fitness_fn', actual)
self.assertIn('n_parents', actual)
self.assertIn('n_evals', actual)
self.assertIn('additive_form', actual)
self.assertIn('optimizer', actual)
self.assertIn('n_restarts_optimizer', actual)
self.assertIn('standardize_x', actual)
self.assertIn('standardize_y', actual)
self.assertIn('total_eval_time', actual)
self.assertIn('total_expansion_time', actual)
self.assertIn('total_model_search_time', actual)
self.assertIn('gp_fn_name', actual)
self.assertIn('gp_args', actual)
self.assertIn('name', actual)
self.assertIn('built', actual)
self.assertIn('selected_models', actual)
self.assertIn('_x_train_mean', actual)
self.assertIn('_x_train_std', actual)
self.assertIn('_y_train_mean', actual)
self.assertIn('_y_train_std', actual)
self.assertIn('initializer', actual)
self.assertIn('n_init_trees', actual)
self.assertIn('max_offspring', actual)
self.assertIn('fitness_sharing', actual)
self.assertEqual(self.model_selector.grammar.to_dict(), actual['grammar'])
self.assertEqual(self.model_selector.fitness_fn_name, actual['fitness_fn'])
self.assertEqual(self.model_selector.n_parents, actual['n_parents'])
self.assertEqual(self.model_selector.n_evals, actual['n_evals'])
self.assertEqual(self.model_selector.additive_form, actual['additive_form'])
self.assertEqual(self.model_selector.optimizer, actual['optimizer'])
self.assertEqual(self.model_selector.n_restarts_optimizer, actual['n_restarts_optimizer'])
self.assertEqual(self.model_selector.standardize_x, actual['standardize_x'])
self.assertEqual(self.model_selector.standardize_y, actual['standardize_y'])
self.assertEqual(self.model_selector.total_eval_time, actual['total_eval_time'])
self.assertEqual(self.model_selector.total_expansion_time, actual['total_expansion_time'])
self.assertEqual(self.model_selector.total_model_search_time, actual['total_model_search_time'])
self.assertEqual(self.model_selector._gp_fn_name, actual['gp_fn_name'])
self.assertEqual(self.model_selector.name, actual['name'])
self.assertEqual(self.model_selector.built, actual['built'])
expected_selected_models = [m.to_dict() for m in self.model_selector.selected_models]
actual_selected_models = [m.to_dict() for m in actual['selected_models']]
self.assertEqual(expected_selected_models, actual_selected_models)
if not built:
self.assertEqual(self.model_selector._x_train_mean, actual['_x_train_mean'])
self.assertEqual(self.model_selector._x_train_std, actual['_x_train_std'])
self.assertEqual(self.model_selector._y_train_mean, actual['_y_train_mean'])
self.assertEqual(self.model_selector._y_train_std, actual['_y_train_std'])
else:
self.assertEqual(self.model_selector._x_train_mean.tolist(), actual['_x_train_mean'])
self.assertEqual(self.model_selector._x_train_std.tolist(), actual['_x_train_std'])
self.assertEqual(self.model_selector._y_train_mean.tolist(), actual['_y_train_mean'])
self.assertEqual(self.model_selector._y_train_std.tolist(), actual['_y_train_std'])
self.assertEqual(self.model_selector.initializer.to_dict(), actual['initializer'])
self.assertEqual(self.model_selector.n_init_trees, actual['n_init_trees'])
self.assertEqual(self.model_selector.max_offspring, actual['max_offspring'])
self.assertEqual(self.model_selector.fitness_sharing, actual['fitness_sharing'])
def test_from_dict_unbuilt(self):
test_cases_built = (False, True)
for built in test_cases_built:
with self.subTest(built=built):
test_cases_cls = (ModelSelector, Serializable)
for cls in test_cases_cls:
with self.subTest(cls=cls):
if built:
self.model_selector._prepare_data(self.x_train, self.y_train)
actual = cls.from_dict(self.model_selector.to_dict())
self.assertIsInstance(actual, EvolutionaryModelSelector)
self.assertEqual(self.model_selector.grammar.__class__.__name__,
actual.grammar.__class__.__name__)
self.assertEqual(self.model_selector.fitness_fn_name, actual.fitness_fn_name)
self.assertEqual(self.model_selector.fitness_fn, actual.fitness_fn)
self.assertEqual(self.model_selector.n_parents, actual.n_parents)
self.assertEqual(self.model_selector.n_evals, actual.n_evals)
self.assertEqual(self.model_selector.additive_form, actual.additive_form)
self.assertEqual(self.model_selector.optimizer, actual.optimizer)
self.assertEqual(self.model_selector.n_restarts_optimizer, actual.n_restarts_optimizer)
self.assertEqual(self.model_selector.standardize_x, actual.standardize_x)
self.assertEqual(self.model_selector.standardize_y, actual.standardize_y)
self.assertEqual(self.model_selector.total_eval_time, actual.total_eval_time)
self.assertEqual(self.model_selector.total_expansion_time, actual.total_expansion_time)
self.assertEqual(self.model_selector.total_model_search_time, actual.total_model_search_time)
self.assertEqual(self.model_selector._gp_fn_name, actual._gp_fn_name)
self.assertEqual(self.model_selector._gp_args, actual._gp_args)
self.assertEqual(self.model_selector.name, actual.name)
self.assertEqual(self.model_selector.built, actual.built)
self.assertEqual(self.model_selector.selected_models, actual.selected_models)
if not built:
self.assertIsNone(actual._x_train_mean)
self.assertIsNone(actual._x_train_std)
self.assertIsNone(actual._y_train_mean)
self.assertIsNone(actual._y_train_std)
else:
self.assertEqual(self.model_selector._x_train_mean.tolist(), actual._x_train_mean.tolist())
self.assertEqual(self.model_selector._x_train_std.tolist(), actual._x_train_std.tolist())
self.assertEqual(self.model_selector._y_train_mean.tolist(), actual._y_train_mean.tolist())
self.assertEqual(self.model_selector._y_train_std.tolist(), actual._y_train_std.tolist())
self.assertEqual(self.model_selector.initializer.__class__.__name__,
actual.initializer.__class__.__name__)
self.assertEqual(self.model_selector.n_init_trees, actual.n_init_trees)
self.assertEqual(self.model_selector.max_offspring, actual.max_offspring)
self.assertEqual(self.model_selector.fitness_sharing, actual.fitness_sharing)
| 59.362832
| 119
| 0.633075
| 2,240
| 20,124
| 5.358929
| 0.067857
| 0.127791
| 0.158614
| 0.195935
| 0.883456
| 0.861213
| 0.845052
| 0.834805
| 0.832139
| 0.832139
| 0
| 0.007658
| 0.266796
| 20,124
| 338
| 120
| 59.538462
| 0.805896
| 0
| 0
| 0.779661
| 0
| 0
| 0.062115
| 0.007056
| 0
| 0
| 0
| 0
| 0.566102
| 1
| 0.033898
| false
| 0
| 0.044068
| 0
| 0.091525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1d2e8bacf598c5ef7dc1dbc1525653afba277f36
| 160
|
py
|
Python
|
bpl_lib/crypto/__init__.py
|
DuneRoot/BPL-python
|
3ac1026cfc01ca5a71515caa5e352e4517cba0cc
|
[
"MIT"
] | null | null | null |
bpl_lib/crypto/__init__.py
|
DuneRoot/BPL-python
|
3ac1026cfc01ca5a71515caa5e352e4517cba0cc
|
[
"MIT"
] | null | null | null |
bpl_lib/crypto/__init__.py
|
DuneRoot/BPL-python
|
3ac1026cfc01ca5a71515caa5e352e4517cba0cc
|
[
"MIT"
] | null | null | null |
from bpl_lib.crypto.Crypto import ripemd160, sha1, sha256, hash160, hash256
from bpl_lib.crypto.Keys import Keys
from bpl_lib.crypto.Signature import Signature
| 40
| 75
| 0.8375
| 25
| 160
| 5.24
| 0.48
| 0.160305
| 0.229008
| 0.366412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090278
| 0.1
| 160
| 3
| 76
| 53.333333
| 0.819444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d51a1f4e3af0a7a883b889d35ca41adb47957e08
| 94
|
py
|
Python
|
netdev/utils/__init__.py
|
AudreyBeard/netdev
|
156d49af5e911b454596bad590de1825d4415753
|
[
"MIT"
] | 2
|
2019-10-02T14:17:43.000Z
|
2020-01-31T01:05:07.000Z
|
netdev/utils/__init__.py
|
AudreyBeard/netdev
|
156d49af5e911b454596bad590de1825d4415753
|
[
"MIT"
] | null | null | null |
netdev/utils/__init__.py
|
AudreyBeard/netdev
|
156d49af5e911b454596bad590de1825d4415753
|
[
"MIT"
] | null | null | null |
# flake8: NOQA
from .clf_utils import *
from .general_utils import *
from .net_utils import *
| 18.8
| 28
| 0.755319
| 14
| 94
| 4.857143
| 0.571429
| 0.485294
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.159574
| 94
| 4
| 29
| 23.5
| 0.848101
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d52b6f1148279a2824aa3176a07a77337529f07d
| 6,198
|
py
|
Python
|
src/calculate_variable_1d.py
|
bdrummond1/um_post_proc
|
2dc1dcaa164772e09e77cd3f3e7d927f2237228a
|
[
"MIT"
] | 1
|
2020-04-23T17:06:40.000Z
|
2020-04-23T17:06:40.000Z
|
src/calculate_variable_1d.py
|
bdrummond1/um_post_proc
|
2dc1dcaa164772e09e77cd3f3e7d927f2237228a
|
[
"MIT"
] | null | null | null |
src/calculate_variable_1d.py
|
bdrummond1/um_post_proc
|
2dc1dcaa164772e09e77cd3f3e7d927f2237228a
|
[
"MIT"
] | null | null | null |
# Module to calculate variable (1D version)
# Looks for requested variable, reads in necessary data and calculates
from construct_variable import *
from constant_user import *
# ---------------------------------------------
# Main function to calculate requested variable
# ---------------------------------------------
def calculate_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,lat_min,lat_max,
lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband):
if varname=='temp':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='ch4_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='h2o_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='co_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='co2_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='hcn_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='n2_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='nh3_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='oh_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='h_mole_fraction':
if verbose:
read_message(varname)
y, var = construct_variable_1d(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='u_timescale':
if verbose:
read_message(varname)
y, var = get_u_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='v_timescale':
if verbose:
read_message(varname)
y, var = get_v_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
elif varname=='w_timescale':
if verbose:
read_message(varname)
y, var = get_w_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,
lat_min,lat_max,lon_min,lon_max,plot_type,pressure_grid,vardim,instrument,nband)
else:
print 'Error: calculate_variable_1d'
print ' variable not implemented: ',varname
exit()
return y, var
# ---------------------------------------------
# Function to calculate zonal dynamical timescale [s]
# Requires user constants (in constant_user.py): planet radius
# ---------------------------------------------
def get_u_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband):
# Get zonal wind velocity
x, var = construct_variable_1d(fname,fname_keys,fname_spec,'u',time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband)
# Calculate timescale
var = 2.*pi*Rp/abs(var)
return x, var
# ---------------------------------------------
# Function to calculate meridional dynamical timescale [s]
# Requires user constants (in constant_user.py): planet radius
# ---------------------------------------------
def get_v_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband):
# Get meridional wind velocity
x, var = construct_variable_1d(fname,fname_keys,fname_spec,'v',time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband)
# Calculate timescale
var = pi*Rp/abs(var)/2.
return x, var
# ---------------------------------------------
# Function to calculate vertical dynamical timescale [s]
# Requires user constants (in constant_user.py): surface gravity, mean molecular mass
# ---------------------------------------------
def get_w_timescale(fname,fname_keys,fname_spec,varname,time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband):
# Get vertical wind velocity
x, w = construct_variable_1d(fname,fname_keys,fname_spec,'w',time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband)
#Get temperature
x, temp = construct_variable_1d(fname,fname_keys,fname_spec,'temp',time_1,time_2,lon_request,lat_min,lat_max,
level,plot_type,pressure_grid,vardim,instrument,nband)
# Calculate scale height
H = kb*temp/(mu*amu*surf_gravity)
# Calculate timescale
var = H/abs(w)
return x, var
def read_message(varname):
print 'Routine: calculate_variable_1d'
print ' requested variable is: ',varname
| 39.987097
| 111
| 0.723782
| 909
| 6,198
| 4.616062
| 0.114411
| 0.050048
| 0.070067
| 0.095091
| 0.851525
| 0.851525
| 0.837703
| 0.837703
| 0.817684
| 0.789085
| 0
| 0.012339
| 0.123911
| 6,198
| 154
| 112
| 40.246753
| 0.760405
| 0.17312
| 0
| 0.602041
| 0
| 0
| 0.059423
| 0.008237
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.020408
| null | null | 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d562001beec9d25268c58e1e847d8bcfb896d53f
| 5,384
|
py
|
Python
|
projects/vdk-plugins/vdk-impala/tests/functional/jobs/load_fact_snapshot_template_job/01_prepare_input_data.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 100
|
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/vdk-impala/tests/functional/jobs/load_fact_snapshot_template_job/01_prepare_input_data.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 208
|
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/vdk-impala/tests/functional/jobs/load_fact_snapshot_template_job/01_prepare_input_data.py
|
alod83/versatile-data-kit
|
9ca672d3929eb3dc6fe5c677e8c8a75e2a0d2be8
|
[
"Apache-2.0"
] | 14
|
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from vdk.api.job_input import IJobInput
__author__ = "VMware, Inc."
__copyright__ = (
"Copyright 2019 VMware, Inc. All rights reserved. -- VMware Confidential"
)
def run(job_input: IJobInput) -> None:
# Step 1: create a table that represents the current state
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{target_schema}`.`{target_table}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{target_schema}`.`{target_table}` (
`dim_sddc_sk` STRING,
`dim_org_id` INT,
`dim_date_id` TIMESTAMP,
`host_count` BIGINT,
`cluster_count` BIGINT,
`{last_arrival_ts}` TIMESTAMP
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{target_schema}`.`{target_table}` VALUES (
-- 2019-11-18
("sddc01-r01", 1, "2019-11-18", 5 , 1, "2019-11-18 09:00:00"),
("sddc02-r01", 2, "2019-11-18", 4 , 1, "2019-11-18 09:00:00"),
("sddc03-r01", 3, "2019-11-18", 12, 3, "2019-11-18 09:00:00"),
("sddc04-r01", 4, "2019-11-18", 4 , 1, "2019-11-18 09:00:00"),
-- 2019-11-19
("sddc01-r01", 1, "2019-11-19", 5 , 1, "2019-11-19 09:00:00"),
("sddc02-r01", 2, "2019-11-19", 4 , 1, "2019-11-19 09:00:00"),
("sddc03-r01", 3, "2019-11-19", 13, 3, "2019-11-19 09:00:00"),
("sddc04-r01", 4, "2019-11-19", 3 , 1, "2019-11-19 09:00:00"),
("sddc05-r02", 5, "2019-11-19", 20, 4, "2019-11-19 09:00:00")
)
"""
)
# Step 2: create a table that represents the next snapshot
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{source_schema}`.`{source_view}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{source_schema}`.`{source_view}` (
`dim_sddc_sk` STRING,
`dim_org_id` INT,
`dim_date_id` TIMESTAMP,
`host_count` BIGINT,
`cluster_count` BIGINT,
`{last_arrival_ts}` TIMESTAMP
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{source_schema}`.`{source_view}` VALUES (
-- 2019-11-18
("sddc05-r01", 5, "2019-11-18", 18, 4, "2019-11-18 09:30:00"), -- late arrival
-- 2019-11-19 (duplicated)
("sddc01-r01", 1, "2019-11-19", 5 , 1, "2019-11-19 09:00:00"), -- duplicated
("sddc02-r01", 2, "2019-11-19", 4 , 1, "2019-11-19 09:00:00"), -- duplicated
("sddc03-r01", 3, "2019-11-19", 13, 3, "2019-11-19 09:00:00"), -- duplicated
("sddc04-r01", 4, "2019-11-19", 3 , 1, "2019-11-19 09:00:00"), -- duplicated
("sddc05-r02", 5, "2019-11-19", 20, 5, "2019-11-19 09:00:00"), -- changed
-- 2019-11-20
("sddc01-r01", 1, "2019-11-20", 10, 2, "2019-11-20 09:00:00"), -- new
("sddc02-r02", 2, "2019-11-20", 7 , 1, "2019-11-20 09:00:00"), -- new
("sddc03-r01", 3, "2019-11-20", 13, 3, "2019-11-20 09:00:00"), -- new
("sddc04-r01", 4, "2019-11-20", 3 , 1, "2019-11-20 09:00:00"), -- new
("sddc05-r04", 5, "2019-11-20", 3 , 1, "2019-11-20 09:00:00"), -- new
("sddc06-r01", 1, "2019-11-20", 3 , 1, "2019-11-20 09:00:00") -- new
)
"""
)
# Step 3: Create a table containing the state expected after updating the current state with the next snapshot
# job_input.execute_query(u'''
# DROP TABLE IF EXISTS `{expect_schema}`.`{expect_table}`
# ''')
job_input.execute_query(
"""
CREATE TABLE IF NOT EXISTS `{expect_schema}`.`{expect_table}` (
`dim_sddc_sk` STRING,
`dim_org_id` INT,
`dim_date_id` TIMESTAMP,
`host_count` BIGINT,
`cluster_count` BIGINT,
`{last_arrival_ts}` TIMESTAMP
) STORED AS PARQUET
"""
)
job_input.execute_query(
"""
INSERT OVERWRITE TABLE `{expect_schema}`.`{expect_table}` VALUES (
-- 2019-11-18
("sddc01-r01", 1, "2019-11-18", 5 , 1, "2019-11-18 09:00:00"),
("sddc02-r01", 2, "2019-11-18", 4 , 1, "2019-11-18 09:00:00"),
("sddc03-r01", 3, "2019-11-18", 12, 3, "2019-11-18 09:00:00"),
("sddc04-r01", 4, "2019-11-18", 4 , 1, "2019-11-18 09:00:00"),
("sddc05-r01", 5, "2019-11-18", 18, 4, "2019-11-18 09:30:00"),
-- 2019-11-19 (duplicated)
("sddc01-r01", 1, "2019-11-19", 5 , 1, "2019-11-19 09:00:00"),
("sddc02-r01", 2, "2019-11-19", 4 , 1, "2019-11-19 09:00:00"),
("sddc03-r01", 3, "2019-11-19", 13, 3, "2019-11-19 09:00:00"),
("sddc04-r01", 4, "2019-11-19", 3 , 1, "2019-11-19 09:00:00"),
("sddc05-r02", 5, "2019-11-19", 20, 5, "2019-11-19 09:00:00"),
-- 2019-11-20
("sddc01-r01", 1, "2019-11-20", 10, 2, "2019-11-20 09:00:00"),
("sddc02-r02", 2, "2019-11-20", 7 , 1, "2019-11-20 09:00:00"),
("sddc03-r01", 3, "2019-11-20", 13, 3, "2019-11-20 09:00:00"),
("sddc04-r01", 4, "2019-11-20", 3 , 1, "2019-11-20 09:00:00"),
("sddc05-r04", 5, "2019-11-20", 3 , 1, "2019-11-20 09:00:00"),
("sddc06-r01", 1, "2019-11-20", 3 , 1, "2019-11-20 09:00:00")
)
"""
)
| 41.736434
| 114
| 0.521174
| 820
| 5,384
| 3.329268
| 0.126829
| 0.18022
| 0.076923
| 0.054945
| 0.860073
| 0.815385
| 0.79011
| 0.784249
| 0.772161
| 0.742857
| 0
| 0.29709
| 0.272288
| 5,384
| 128
| 115
| 42.0625
| 0.399694
| 0.105312
| 0
| 0.333333
| 0
| 0
| 0.155268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
63a53f930cbcb2d2fa81fe96c83add8bca7966b8
| 163
|
py
|
Python
|
models/face_recognition/utils/__init__.py
|
JasonZuu/Frame-Selection
|
3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T03:11:24.000Z
|
2022-03-29T03:11:24.000Z
|
models/face_recognition/utils/__init__.py
|
JasonZuu/Frame-Selection
|
3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
|
[
"BSD-3-Clause"
] | null | null | null |
models/face_recognition/utils/__init__.py
|
JasonZuu/Frame-Selection
|
3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
|
[
"BSD-3-Clause"
] | null | null | null |
from utils.face_features import *
from utils.cost_time import *
from utils.pull_faces import *
from utils.matrix_process import *
from utils.draw_boxs import *
| 32.6
| 35
| 0.797546
| 25
| 163
| 5
| 0.52
| 0.36
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141104
| 163
| 5
| 36
| 32.6
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
89a9f269a10db26749bc0a3b0dfc4aebfc8c6ae0
| 2,909
|
py
|
Python
|
pyaz/netappfiles/snapshot/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/netappfiles/snapshot/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/netappfiles/snapshot/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage Azure NetApp Files (ANF) Snapshot Resources.
'''
from ... pyaz_utils import _call_az
from . import policy
def show(account_name, name, pool_name, resource_group, volume_name):
'''
Get the specified ANF snapshot.
Required Parameters:
- account_name -- Name of the ANF account.
- name -- The name of the ANF snapshot
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- Name of the ANF volume.
'''
return _call_az("az netappfiles snapshot show", locals())
def list(account_name, pool_name, resource_group, volume_name):
'''
List the snapshots of an ANF volume.
Required Parameters:
- account_name -- Name of the ANF account.
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- The name of the ANF volume
'''
return _call_az("az netappfiles snapshot list", locals())
def delete(account_name, name, pool_name, resource_group, volume_name):
'''
Delete the specified ANF snapshot.
Required Parameters:
- account_name -- Name of the ANF account.
- name -- The name of the ANF snapshot
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- Name of the ANF volume.
'''
return _call_az("az netappfiles snapshot delete", locals())
def create(account_name, location, name, pool_name, resource_group, volume_name):
'''
Create a new Azure NetApp Files (ANF) snapshot.
Required Parameters:
- account_name -- Name of the ANF account.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- The name of the ANF snapshot
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- Name of the ANF volume.
'''
return _call_az("az netappfiles snapshot create", locals())
def update(account_name, body, name, pool_name, resource_group, volume_name):
'''
Required Parameters:
- account_name -- Name of the ANF account.
- body -- Snapshot object supplied in the body of the operation.
- name -- The name of the ANF snapshot
- pool_name -- Name of the ANF pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- volume_name -- Name of the ANF volume.
'''
return _call_az("az netappfiles snapshot update", locals())
| 37.294872
| 161
| 0.69199
| 401
| 2,909
| 4.887781
| 0.13217
| 0.073469
| 0.087245
| 0.116327
| 0.802551
| 0.767857
| 0.766327
| 0.712755
| 0.712755
| 0.616837
| 0
| 0
| 0.214507
| 2,909
| 77
| 162
| 37.779221
| 0.857768
| 0.661396
| 0
| 0
| 0
| 0
| 0.188387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0.166667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
982d78650b3ba9ccf4dbcbe6a9c2820d2feb0542
| 1,758
|
py
|
Python
|
main.py
|
kotsky/ai_dev
|
9222a98dd11329ef65124d23249b3f62eff7925d
|
[
"MIT"
] | null | null | null |
main.py
|
kotsky/ai_dev
|
9222a98dd11329ef65124d23249b3f62eff7925d
|
[
"MIT"
] | null | null | null |
main.py
|
kotsky/ai_dev
|
9222a98dd11329ef65124d23249b3f62eff7925d
|
[
"MIT"
] | null | null | null |
"""Regression
Follow jupyter notebook workflow for better
understanding of how to apply data_reader and
Regression model to train your AI.
https://kotsky.github.io/projects/ai_from_scratch/regression_workflow.html
Jupiter notebook: https://github.com/kotsky/ai-dev/blob/main/regression_workflow.ipynb
Package: https://github.com/kotsky/ai-dev/blob/main/regression/regression.py
"""
"""Logistic Regression
Follow jupyter notebook workflow for better
understanding of how to apply data_reader and
Logistic Regression model to train your AI.
Link: https://kotsky.github.io/projects/ai_from_scratch/logistic_regression_workflow.html
Jupiter notebook: https://github.com/kotsky/ai-dev/blob/main/logistic_regression_workflow.ipynb
Package: https://github.com/kotsky/ai-dev/blob/main/classification/logistic_regression.py
https://kotsky.github.io/projects/ai_from_scratch/kmean_workflow.html
"""
"""K-Nearest Neighbors
Follow jupyter notebook workflow for better
understanding of how to apply data_reader and
K-Nearest Neighbors model to train your AI.
Link: https://kotsky.github.io/projects/ai_from_scratch/knn_workflow.html
Jupiter notebook: https://github.com/kotsky/ai-dev/blob/main/knn_workflow.ipynb
Package: https://github.com/kotsky/ai-dev/blob/main/classification/knn.py
"""
"""K-Mean
Follow jupyter notebook workflow for better
understanding of how to apply data_reader and
K-Mean model to train your AI.
Link: https://kotsky.github.io/projects/ai_from_scratch/kmean_workflow.html
Jupiter notebook: https://github.com/kotsky/ai-dev/blob/main/kmean_workflow.ipynb
Package: https://github.com/kotsky/ai-dev/blob/main/clusterization/kmean.py
"""
if __name__ == '__main__':
pass
| 32.555556
| 96
| 0.781001
| 257
| 1,758
| 5.210117
| 0.18677
| 0.065721
| 0.083645
| 0.119492
| 0.882748
| 0.882748
| 0.854369
| 0.854369
| 0.824496
| 0.817028
| 0
| 0
| 0.113766
| 1,758
| 53
| 97
| 33.169811
| 0.859435
| 0.214448
| 0
| 0
| 0
| 0
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
988ccbb386499f98bee9ef72da83a364d7fae41e
| 9,816
|
py
|
Python
|
cajas/movement/migrations/0001_initial.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
cajas/movement/migrations/0001_initial.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
cajas/movement/migrations/0001_initial.py
|
dmontoya1/cajas
|
5eb3d5835250d5dafae398082200b79c1ca8063b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.9 on 2019-04-08 16:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('concepts', '0001_initial'),
('boxes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MovementDailySquare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
('review', models.BooleanField(default=False, verbose_name='Movimiento Revisado?')),
('status', models.CharField(blank=True, choices=[('AP', 'Aprobado'), ('DE', 'Rechazado'), ('DI', 'Dispersado')], max_length=2, null=True, verbose_name='Estado de la revisión')),
('denied_detail', models.TextField(blank=True, null=True, verbose_name='Detalle del rechazo del movimiento')),
],
options={
'verbose_name': 'Movimiento del Cuadre Diario',
'verbose_name_plural': 'Movimientos del Cuadre Diario',
},
),
migrations.CreateModel(
name='MovementDonJuan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
],
options={
'verbose_name': 'Movimiento de Don Juan',
'verbose_name_plural': 'Movimientos de Don Juan',
},
),
migrations.CreateModel(
name='MovementDonJuanUsd',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
],
options={
'verbose_name': 'Movimiento de Don Juan',
'verbose_name_plural': 'Movimientos de Don Juan',
},
),
migrations.CreateModel(
name='MovementOffice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
],
options={
'verbose_name': 'Movimiento de la oficina',
'verbose_name_plural': 'Movimientos de la oficina',
},
),
migrations.CreateModel(
name='MovementPartner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
],
options={
'verbose_name': 'Movimiento del socio',
'verbose_name_plural': 'Movimientos del socio',
},
),
migrations.CreateModel(
name='MovementProvisioning',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
],
options={
'verbose_name': 'Movimiento de aprovisionamiento',
'verbose_name_plural': 'Movimientos de aprovisionamiento',
},
),
migrations.CreateModel(
name='MovementRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
('observation', models.TextField(help_text='Observación por la cual se debería de aceptar el movimiento que sobrepasó el tope', verbose_name='Observación')),
('withdraw_reason', models.TextField(blank=True, null=True, verbose_name='Razón de solicitud de permiso retiro de socio')),
],
options={
'verbose_name': 'Requerimiento de Movimiento',
'verbose_name_plural': 'Requerimientos de movimientos',
},
),
migrations.CreateModel(
name='MovementWithdraw',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movement_type', models.CharField(choices=[('IN', 'Entra'), ('OUT', 'Sale')], max_length=10, verbose_name='Tipo de movimiento')),
('value', models.IntegerField(default=0, verbose_name='Valor')),
('detail', models.TextField(blank=True, null=True, verbose_name='Detalle')),
('date', models.DateField(verbose_name='Fecha')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='Dirección IP responsable')),
('balance', models.IntegerField(default=0, verbose_name='Saldo')),
('observation', models.TextField(help_text='Observación por la cual se debería de aceptar el movimiento', verbose_name='Observación')),
('withdraw_reason', models.TextField(blank=True, null=True, verbose_name='Razón de solicitud de permiso retiro de socio')),
('box_daily_square', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_withdraw', to='boxes.BoxDailySquare', verbose_name='Caja Cuadre Diario')),
('box_partner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_withdraw', to='boxes.BoxPartner', verbose_name='Caja Socio')),
('concept', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='concepts.Concept', verbose_name='Concepto')),
],
options={
'verbose_name': 'Requerimiento de retiro',
'verbose_name_plural': 'Requerimientos de retiro',
},
),
]
| 62.923077
| 220
| 0.597392
| 981
| 9,816
| 5.831804
| 0.141692
| 0.157665
| 0.049991
| 0.065373
| 0.831323
| 0.780982
| 0.780982
| 0.780982
| 0.780982
| 0.780982
| 0
| 0.00763
| 0.252343
| 9,816
| 155
| 221
| 63.329032
| 0.771904
| 0.004584
| 0
| 0.689189
| 1
| 0
| 0.241888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013514
| 0
| 0.040541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98a9842ed2835f8ea3a93f0daef3dabf8de77272
| 2,962
|
py
|
Python
|
maro/cli/k8s/job.py
|
yangboz/maro
|
0973783e55ca07bf8e177910c9d47854117a4ea8
|
[
"MIT"
] | 598
|
2020-09-23T00:50:22.000Z
|
2022-03-31T08:12:54.000Z
|
maro/cli/k8s/job.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 235
|
2020-09-22T10:20:48.000Z
|
2022-03-31T02:10:03.000Z
|
maro/cli/k8s/job.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 116
|
2020-09-22T09:19:04.000Z
|
2022-02-12T05:04:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.utils.details_validity_wrapper import check_details_validity
from maro.cli.utils.operation_lock_wrapper import operation_lock
@check_details_validity
@operation_lock
def start_job(cluster_name: str, deployment_path: str, **kwargs):
# Late import.
from maro.cli.k8s.executors.k8s_aks_executor import K8sAksExecutor
from maro.cli.utils.details_reader import DetailsReader
from maro.utils.exception.cli_exception import BadRequestError
# Load details
cluster_details = DetailsReader.load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.start_job(
deployment_path=deployment_path
)
else:
raise BadRequestError(f"Unsupported operation in mode '{cluster_details['mode']}'.")
@check_details_validity
@operation_lock
def stop_job(cluster_name: str, job_name: str, **kwargs):
# Late import.
from maro.cli.k8s.executors.k8s_aks_executor import K8sAksExecutor
from maro.cli.utils.details_reader import DetailsReader
from maro.utils.exception.cli_exception import BadRequestError
# Load details
cluster_details = DetailsReader.load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.stop_job(
job_name=job_name
)
else:
raise BadRequestError(f"Unsupported operation in mode '{cluster_details['mode']}'.")
@check_details_validity
@operation_lock
def get_job_logs(cluster_name: str, job_name: str, **kwargs):
# Late import.
from maro.cli.k8s.executors.k8s_aks_executor import K8sAksExecutor
from maro.cli.utils.details_reader import DetailsReader
from maro.utils.exception.cli_exception import BadRequestError
# Load details
cluster_details = DetailsReader.load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.get_job_logs(job_name=job_name)
else:
raise BadRequestError(f"Unsupported operation in mode '{cluster_details['mode']}'.")
@check_details_validity
@operation_lock
def list_job(cluster_name: str, **kwargs):
# Late import.
from maro.cli.k8s.executors.k8s_aks_executor import K8sAksExecutor
from maro.cli.utils.details_reader import DetailsReader
from maro.utils.exception.cli_exception import BadRequestError
# Load details
cluster_details = DetailsReader.load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.list_job()
else:
raise BadRequestError(f"Unsupported operation in mode '{cluster_details['mode']}'.")
| 35.686747
| 92
| 0.75287
| 365
| 2,962
| 5.852055
| 0.139726
| 0.102996
| 0.051498
| 0.082397
| 0.869382
| 0.858614
| 0.84176
| 0.84176
| 0.84176
| 0.84176
| 0
| 0.008055
| 0.161715
| 2,962
| 82
| 93
| 36.121951
| 0.852195
| 0.058069
| 0
| 0.740741
| 0
| 0
| 0.099316
| 0.040302
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.259259
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f54961fb9280016a8d516844754eef2a63704a0
| 8,318
|
py
|
Python
|
main/tests/test_comments.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | 30
|
2020-06-09T12:46:18.000Z
|
2022-02-28T23:50:22.000Z
|
main/tests/test_comments.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | 9
|
2020-06-11T15:59:00.000Z
|
2022-03-03T00:39:54.000Z
|
main/tests/test_comments.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | 5
|
2020-06-01T00:15:42.000Z
|
2021-07-02T12:46:43.000Z
|
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from main import models
class CommentFullCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"name": "Jon",
"email": "jon@wick.com",
"body": "Content sentence.",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Comment.objects.all().count(), 1)
self.assertEqual(models.Comment.objects.all().first().name, data["name"])
self.assertEqual(models.Comment.objects.all().first().email, data["email"])
self.assertEqual(models.Comment.objects.all().first().body, data["body"])
self.assertEqual(models.Comment.objects.all().first().post, self.post)
class CommentNameCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"name": "Jon",
"body": "Content sentence.",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Comment.objects.all().count(), 1)
self.assertEqual(models.Comment.objects.all().first().name, data["name"])
self.assertEqual(models.Comment.objects.all().first().body, data["body"])
self.assertEqual(models.Comment.objects.all().first().post, self.post)
class CommentEmailCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"email": "jon@wick.com",
"body": "Content sentence.",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Comment.objects.all().count(), 1)
self.assertEqual(models.Comment.objects.all().first().name, "Anonymous")
self.assertEqual(models.Comment.objects.all().first().email, data["email"])
self.assertEqual(models.Comment.objects.all().first().body, data["body"])
self.assertEqual(models.Comment.objects.all().first().post, self.post)
class CommentAnonCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"body": "Content sentence.",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Comment.objects.all().count(), 1)
self.assertEqual(models.Comment.objects.all().first().name, "Anonymous")
self.assertEqual(models.Comment.objects.all().first().body, data["body"])
self.assertEqual(models.Comment.objects.all().first().post, self.post)
class CommentNoBodyCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"body": "",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Comment.objects.all().count(), 0)
class CommentDisallowedCreateTestCase(TestCase):
def setUp(self):
# user.comments_on=False is the default
self.user = models.User.objects.create(username="alice")
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
def test_comment_create(self):
data = {
"body": "",
}
response = self.client.post(
reverse("comment_create", args=(self.post.slug,)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(models.Comment.objects.all().count(), 0)
class CommentDeleteTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.client.force_login(self.user)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
self.comment = models.Comment.objects.create(
name="Jon",
email="jon@wick.com",
body="Content sentence.",
post=self.post,
)
def test_comment_delete(self):
response = self.client.post(
reverse("comment_delete", args=(self.post.slug, self.comment.id)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Comment.objects.all().count(), 0)
class CommentNonOwnerDeleteTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
self.comment = models.Comment.objects.create(
name="Jon",
email="jon@wick.com",
body="Content sentence.",
post=self.post,
)
self.non_owner = models.User.objects.create(username="bob")
self.client.force_login(self.non_owner)
def test_comment_delete(self):
response = self.client.post(
reverse("comment_delete", args=(self.post.slug, self.comment.id)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Comment.objects.all().count(), 1)
class CommentAnonDeleteTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice", comments_on=True)
self.post = models.Post.objects.create(
title="Hello world",
slug="hello-world",
owner=self.user,
)
self.comment = models.Comment.objects.create(
name="Jon",
email="jon@wick.com",
body="Content sentence.",
post=self.post,
)
def test_comment_delete(self):
response = self.client.post(
reverse("comment_delete", args=(self.post.slug, self.comment.id)),
HTTP_HOST="alice." + settings.CANONICAL_HOST,
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Comment.objects.all().count(), 1)
| 35.853448
| 83
| 0.598702
| 902
| 8,318
| 5.446785
| 0.084257
| 0.0977
| 0.105842
| 0.131081
| 0.913495
| 0.898229
| 0.898229
| 0.898229
| 0.889273
| 0.888052
| 0
| 0.005869
| 0.262563
| 8,318
| 231
| 84
| 36.008658
| 0.795077
| 0.004448
| 0
| 0.782178
| 0
| 0
| 0.086242
| 0
| 0
| 0
| 0
| 0
| 0.158416
| 1
| 0.089109
| false
| 0
| 0.019802
| 0
| 0.153465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6d5ce35b72c5d815f079106f3cb133d67e6f1be
| 5,623
|
py
|
Python
|
napari/tests/test_advanced.py
|
arokem/napari
|
e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd
|
[
"BSD-3-Clause"
] | null | null | null |
napari/tests/test_advanced.py
|
arokem/napari
|
e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd
|
[
"BSD-3-Clause"
] | 1
|
2019-09-18T22:59:55.000Z
|
2019-09-23T16:41:08.000Z
|
napari/tests/test_advanced.py
|
arokem/napari
|
e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from napari import Viewer
def test_4D_5D_images(qtbot):
"""Test adding 4D followed by 5D image layers to the viewer.
Intially only 2 sliders should be present, then a third slider should be
created.
"""
np.random.seed(0)
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
# add 4D image data
data = np.random.random((2, 6, 30, 40))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data)
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 4
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 2
# now add 5D image data - check an extra slider has been created
data = np.random.random((4, 4, 5, 30, 40))
viewer.add_image(data)
assert np.all(viewer.layers[1].data == data)
assert len(viewer.layers) == 2
assert viewer.dims.ndim == 5
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 3
# Close the viewer
viewer.window.close()
def test_change_image_dims(qtbot):
"""Test changing the dims and shape of an image layer in place and checking
the numbers of sliders and their ranges changes appropriately.
"""
np.random.seed(0)
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
# add 3D image data
data = np.random.random((10, 30, 40))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data)
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 3
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 1
# switch number of displayed dimensions
viewer.layers[0].data = data[0]
assert np.all(viewer.layers[0].data == data[0])
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# switch number of displayed dimensions
viewer.layers[0].data = data[:6]
assert np.all(viewer.layers[0].data == data[:6])
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 3
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 1
# change the shape of the data
viewer.layers[0].data = data[:3]
assert np.all(viewer.layers[0].data == data[:3])
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 3
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 1
# Close the viewer
viewer.window.close()
def test_range_one_image(qtbot):
"""Test adding an image with a range one dimensions.
There should be no slider shown for the axis corresponding to the range
one dimension.
"""
np.random.seed(0)
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
# add 5D image data with range one dimensions
data = np.random.random((1, 1, 1, 100, 200))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data)
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 5
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# now add 5D points data - check extra sliders have been created
points = np.floor(5 * np.random.random((1000, 5))).astype(int)
points[:, -2:] = 20 * points[:, -2:]
viewer.add_points(points)
assert np.all(viewer.layers[1].data == points)
assert len(viewer.layers) == 2
assert viewer.dims.ndim == 5
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 3
# Close the viewer
viewer.window.close()
def test_range_one_images_and_points(qtbot):
"""Test adding images with range one dimensions and points.
Intially no sliders should be present as the images have range one
dimensions. On adding the points the sliders should be displayed.
"""
np.random.seed(0)
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
# add 5D image data with range one dimensions
data = np.random.random((1, 1, 1, 100, 200))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data)
assert len(viewer.layers) == 1
assert viewer.dims.ndim == 5
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# now add 5D points data - check extra sliders have been created
points = np.floor(5 * np.random.random((1000, 5))).astype(int)
points[:, -2:] = 20 * points[:, -2:]
viewer.add_points(points)
assert np.all(viewer.layers[1].data == points)
assert len(viewer.layers) == 2
assert viewer.dims.ndim == 5
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 3
# Close the viewer
viewer.window.close()
def test_update_console(qtbot):
"""Test updating the console with local variables."""
viewer = Viewer()
view = viewer.window.qt_viewer
qtbot.addWidget(view)
# Check viewer in console
assert view.console.kernel_client is not None
assert 'viewer' in view.console.shell.user_ns
assert view.console.shell.user_ns['viewer'] == viewer
a = 4
b = 5
viewer.update_console(locals())
assert 'a' in view.console.shell.user_ns
assert view.console.shell.user_ns['a'] == a
assert 'b' in view.console.shell.user_ns
assert view.console.shell.user_ns['b'] == b
# Close the viewer
viewer.window.close()
| 32.316092
| 79
| 0.671528
| 840
| 5,623
| 4.428571
| 0.139286
| 0.074194
| 0.075269
| 0.045699
| 0.760484
| 0.760484
| 0.736559
| 0.727957
| 0.702151
| 0.69086
| 0
| 0.02784
| 0.207896
| 5,623
| 173
| 80
| 32.50289
| 0.807364
| 0.21003
| 0
| 0.727273
| 0
| 0
| 0.003676
| 0
| 0
| 0
| 0
| 0
| 0.518182
| 1
| 0.045455
| false
| 0
| 0.018182
| 0
| 0.063636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6d7a4dd05c74deb0606e272b4544f46a9f9f35c
| 39,915
|
py
|
Python
|
swagger_client/api/campaigns_api.py
|
klaviyo/klaviyo-python
|
8f95cdaf1469711ab99ecfbfb64ce743451c490d
|
[
"MIT"
] | 10
|
2021-12-21T02:08:00.000Z
|
2022-02-24T05:37:20.000Z
|
swagger_client/api/campaigns_api.py
|
klaviyo/klaviyo-python
|
8f95cdaf1469711ab99ecfbfb64ce743451c490d
|
[
"MIT"
] | 3
|
2022-02-02T09:07:40.000Z
|
2022-03-04T15:31:11.000Z
|
swagger_client/api/campaigns_api.py
|
klaviyo/klaviyo-python
|
8f95cdaf1469711ab99ecfbfb64ce743451c490d
|
[
"MIT"
] | 2
|
2021-12-21T02:07:53.000Z
|
2022-02-22T08:05:41.000Z
|
# coding: utf-8
"""
Klaviyo API
Empowering creators to own their destiny # noqa: E501
OpenAPI spec version: 2022.03.29
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class CampaignsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.warned = []
def cancel_campaign(self, campaign_id, **kwargs): # noqa: E501
"""Cancel a Campaign # noqa: E501
Cancels a campaign send. Marks a campaign as cancelled regardless of it's current status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_campaign(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cancel_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.cancel_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def cancel_campaign_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Cancel a Campaign # noqa: E501
Cancels a campaign send. Marks a campaign as cancelled regardless of it's current status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_campaign_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_campaign" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `cancel_campaign`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}/cancel', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Campaign', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def clone_campaign(self, campaign_id, **kwargs): # noqa: E501
"""Clone a Campaign # noqa: E501
Creates a copy of a campaign. The new campaign starts as a draft. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.clone_campaign(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str name:
:param str list_id:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.clone_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.clone_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def clone_campaign_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Clone a Campaign # noqa: E501
Creates a copy of a campaign. The new campaign starts as a draft. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.clone_campaign_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str name:
:param str list_id:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'name', 'list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method clone_campaign" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `clone_campaign`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'list_id' in params:
form_params.append(('list_id', params['list_id'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}/clone', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Campaign', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_campaign(self, **kwargs): # noqa: E501
"""Create New Campaign # noqa: E501
Creates a new campaign. The created campaign is a draft and is not automatically sent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_campaign(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id:
:param str template_id:
:param str from_email:
:param str from_name:
:param str subject:
:param str name:
:param bool use_smart_sending:
:param bool add_google_analytics:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_campaign_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_campaign_with_http_info(**kwargs) # noqa: E501
return data
def create_campaign_with_http_info(self, **kwargs): # noqa: E501
"""Create New Campaign # noqa: E501
Creates a new campaign. The created campaign is a draft and is not automatically sent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_campaign_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id:
:param str template_id:
:param str from_email:
:param str from_name:
:param str subject:
:param str name:
:param bool use_smart_sending:
:param bool add_google_analytics:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'template_id', 'from_email', 'from_name', 'subject', 'name', 'use_smart_sending', 'add_google_analytics'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_campaign" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'list_id' in params:
form_params.append(('list_id', params['list_id'])) # noqa: E501
if 'template_id' in params:
form_params.append(('template_id', params['template_id'])) # noqa: E501
if 'from_email' in params:
form_params.append(('from_email', params['from_email'])) # noqa: E501
if 'from_name' in params:
form_params.append(('from_name', params['from_name'])) # noqa: E501
if 'subject' in params:
form_params.append(('subject', params['subject'])) # noqa: E501
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'use_smart_sending' in params:
form_params.append(('use_smart_sending', params['use_smart_sending'])) # noqa: E501
if 'add_google_analytics' in params:
form_params.append(('add_google_analytics', params['add_google_analytics'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaigns', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Campaign', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_campaign_info(self, campaign_id, **kwargs): # noqa: E501
"""Get Campaign Info # noqa: E501
Returns summary information for the campaign specified. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaign_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_campaign_info_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.get_campaign_info_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def get_campaign_info_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Get Campaign Info # noqa: E501
Returns summary information for the campaign specified. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaign_info_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_campaign_info" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `get_campaign_info`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Campaign', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_campaign_recipients(self, campaign_id, **kwargs): # noqa: E501
"""Get Campaign Recipients # noqa: E501
Returns summary information about email recipients for the campaign specified that includes each recipients email, customer ID, and status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaign_recipients(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param int count: For pagination, the number of results to return. Max = 25,000
:param str sort: Sort order to apply to results, either ascending or descending. Valid values are `asc` or `desc`. Defaults to `asc`.
:param str offset: For pagination, if a response to this endpoint includes a `next_offset`, use that value to get the next page of recipients.
:return: InlineResponse20011
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_campaign_recipients_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.get_campaign_recipients_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def get_campaign_recipients_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Get Campaign Recipients # noqa: E501
Returns summary information about email recipients for the campaign specified that includes each recipients email, customer ID, and status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaign_recipients_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param int count: For pagination, the number of results to return. Max = 25,000
:param str sort: Sort order to apply to results, either ascending or descending. Valid values are `asc` or `desc`. Defaults to `asc`.
:param str offset: For pagination, if a response to this endpoint includes a `next_offset`, use that value to get the next page of recipients.
:return: InlineResponse20011
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'count', 'sort', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_campaign_recipients" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `get_campaign_recipients`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}/recipients', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20011', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_campaigns(self, **kwargs): # noqa: E501
"""Get Campaigns # noqa: E501
Returns a list of all the campaigns you've created. The campaigns are returned in reverse sorted order by the time they were created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaigns(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: For pagination, which page of results to return. Default = 0
:param int count: For pagination, the number of results to return. Max = 100
:return: InlineResponse2009
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_campaigns_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_campaigns_with_http_info(**kwargs) # noqa: E501
return data
def get_campaigns_with_http_info(self, **kwargs): # noqa: E501
"""Get Campaigns # noqa: E501
Returns a list of all the campaigns you've created. The campaigns are returned in reverse sorted order by the time they were created. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_campaigns_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: For pagination, which page of results to return. Default = 0
:param int count: For pagination, the number of results to return. Max = 100
:return: InlineResponse2009
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'count'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_campaigns" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaigns', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2009', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def schedule_campaign(self, campaign_id, **kwargs): # noqa: E501
"""Schedule a Campaign # noqa: E501
Schedules a campaign for a time in the future # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schedule_campaign(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str send_time:
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.schedule_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.schedule_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def schedule_campaign_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Schedule a Campaign # noqa: E501
Schedules a campaign for a time in the future # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.schedule_campaign_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str send_time:
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'send_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method schedule_campaign" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `schedule_campaign`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'send_time' in params:
form_params.append(('send_time', params['send_time'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}/schedule', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20010', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def send_campaign(self, campaign_id, **kwargs): # noqa: E501
"""Send a Campaign Immediately # noqa: E501
Queues a campaign for immediate delivery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_campaign(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.send_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.send_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def send_campaign_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Send a Campaign Immediately # noqa: E501
Queues a campaign for immediate delivery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_campaign_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:return: InlineResponse20010
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method send_campaign" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `send_campaign`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}/send', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse20010', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_campaign(self, campaign_id, **kwargs): # noqa: E501
"""Update Campaign # noqa: E501
Updates details of a campaign. You can update a campaign's name, subject, from email address, from name, template or list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_campaign(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str list_id:
:param str template_id:
:param str from_email:
:param str from_name:
:param str subject:
:param str name:
:param bool use_smart_sending:
:param bool add_google_analytics:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.update_campaign_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def update_campaign_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""Update Campaign # noqa: E501
Updates details of a campaign. You can update a campaign's name, subject, from email address, from name, template or list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_campaign_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: (required)
:param str list_id:
:param str template_id:
:param str from_email:
:param str from_name:
:param str subject:
:param str name:
:param bool use_smart_sending:
:param bool add_google_analytics:
:return: Campaign
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'list_id', 'template_id', 'from_email', 'from_name', 'subject', 'name', 'use_smart_sending', 'add_google_analytics'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_campaign" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `update_campaign`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'list_id' in params:
form_params.append(('list_id', params['list_id'])) # noqa: E501
if 'template_id' in params:
form_params.append(('template_id', params['template_id'])) # noqa: E501
if 'from_email' in params:
form_params.append(('from_email', params['from_email'])) # noqa: E501
if 'from_name' in params:
form_params.append(('from_name', params['from_name'])) # noqa: E501
if 'subject' in params:
form_params.append(('subject', params['subject'])) # noqa: E501
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'use_smart_sending' in params:
form_params.append(('use_smart_sending', params['use_smart_sending'])) # noqa: E501
if 'add_google_analytics' in params:
form_params.append(('add_google_analytics', params['add_google_analytics'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKeyAuth'] # noqa: E501
return self.api_client.call_api(
'/v1/campaign/{campaign_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Campaign', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.755976
| 167
| 0.612051
| 4,655
| 39,915
| 5.003652
| 0.052846
| 0.053237
| 0.025416
| 0.027821
| 0.965009
| 0.957925
| 0.957324
| 0.951185
| 0.950841
| 0.942126
| 0
| 0.020041
| 0.296179
| 39,915
| 1,003
| 168
| 39.795613
| 0.809063
| 0.341676
| 0
| 0.816794
| 1
| 0
| 0.200765
| 0.041351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03626
| false
| 0
| 0.007634
| 0
| 0.097328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
10111cec7d8b32fe519014912e972988c397b245
| 11,219
|
py
|
Python
|
ini_my_model.py
|
samxu0823/anfis-pytorch
|
b4ec3f0e8259963800e9e0a2904a580d1e56cc1c
|
[
"MIT"
] | null | null | null |
ini_my_model.py
|
samxu0823/anfis-pytorch
|
b4ec3f0e8259963800e9e0a2904a580d1e56cc1c
|
[
"MIT"
] | null | null | null |
ini_my_model.py
|
samxu0823/anfis-pytorch
|
b4ec3f0e8259963800e9e0a2904a580d1e56cc1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# @ -*- coding: utf-8 -*-
# @Time: 2021/6/25 21:39
# @Author: Wei XU <samxu0823@gmail.com>
import numpy as np
import anfis
from membership import BellMembFunc, make_bell_mfs, make_tri_mfs, make_gauss_mfs
def my_model_m1(rules='less'):
"""
Initialization of the ANFIS regressor for m1 prediction. Single output.
Fuzzy reasoning and rules can be modified according to human-expertise.
:param rules: one rule for each case (less) or three rules for each case (more)
:return:
model: Initialized ANFIS
"""
if rules == 'less':
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 5)))]
rules = [[0, 2, 2, 6, 0, 2, 0, 6, 0, 0], [3, 3, 0, 2, 3, 0, 3, 2, 3, 3],
[2, 4, 1, 1, 2, 1, 2, 1, 2, 2], [1, 0, 1, 5, 1, 2, 1, 5, 1, 1],
[0, 3, 2, 4, 0, 2, 0, 4, 0, 0], [4, 2, 0, 0, 4, 0, 4, 0, 4, 4],
[1, 0, 1, 2, 1, 2, 1, 2, 1, 1], [2, 1, 1, 3, 2, 1, 2, 3, 2, 2]]
elif rules == 'more':
invardefs = [
('x0', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x1', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x2', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x3', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x4', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x5', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x6', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x7', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x8', make_gauss_mfs(0.4, np.linspace(0, 1, 10))),
('x9', make_gauss_mfs(0.4, np.linspace(0, 1, 10))), ]
rules = [[1, 8, 5, 2, 7, 4, 6, 2, 7, 7], [0, 6, 4, 1, 6, 4, 8, 1, 6, 6],
[1, 6, 4, 2, 5, 4, 8, 1, 5, 5], [1, 7, 5, 2, 6, 4, 7, 1, 6, 6],
[1, 7, 5, 2, 6, 4, 7, 2, 6, 6], [0, 6, 4, 1, 5, 4, 8, 1, 5, 5],
[1, 6, 4, 2, 6, 4, 7, 1, 6, 6], [1, 7, 4, 2, 6, 4, 7, 1, 6, 6],
[0, 5, 1, 1, 4, 1, 0, 1, 4, 4], [0, 5, 0, 0, 4, 0, 5, 0, 4, 4],
[1, 3, 0, 1, 3, 0, 5, 1, 3, 3], [0, 3, 1, 0, 2, 1, 1, 0, 2, 2],
[0, 2, 1, 1, 2, 0, 3, 1, 2, 2], [0, 0, 0, 0, 0, 0, 5, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 1, 1], [1, 6, 0, 1, 5, 0, 5, 1, 5, 5],
[6, 9, 9, 7, 9, 9, 9, 7, 9, 9], [1, 8, 9, 3, 7, 7, 9, 2, 7, 7],
[1, 8, 9, 2, 7, 8, 9, 2, 7, 7], [4, 9, 9, 6, 9, 8, 9, 5, 9, 9],
[4, 9, 9, 5, 9, 7, 9, 5, 9, 9], [1, 8, 9, 3, 8, 8, 9, 2, 8, 8],
[9, 9, 9, 9, 9, 8, 9, 9, 9, 9], [1, 7, 8, 2, 7, 8, 9, 2, 7, 7]]
outvars = ['m1']
model = anfis.AnfisNet('My_Anfis', invardefs, outvars, grid=False)
model.set_rules(rules)
return model
def my_model_k1():
"""
Initialization of the ANFIS regressor for k1 prediction. Single output.
Fuzzy reasoning and rules can be modified according to human-expertise.
:return:
model: Initialized ANFIS
"""
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
]
outvars = ['k1']
model = anfis.AnfisNet('My_Anfis', invardefs, outvars, grid=False)
rules = [[1, 1, 2, 2, 4, 1, 4, 0, 1, 6], [0, 0, 0, 0, 0, 0, 0, 3, 4, 2],
[1, 3, 2, 1, 5, 2, 5, 2, 0, 0], [0, 0, 1, 1, 1, 0, 1, 1, 3, 1],
[1, 3, 2, 2, 5, 2, 5, 0, 0, 3], [0, 2, 0, 0, 2, 1, 2, 4, 3, 4],
[0, 2, 1, 1, 3, 1, 3, 1, 2, 4], [1, 1, 2, 1, 4, 1, 4, 2, 1, 5]]
model.set_rules(rules)
return model
def my_model_c1(rules='less'):
"""
Initialization of the ANFIS regressor for c1 prediction. Single output.
Fuzzy reasoning and rules can be modified according to human-expertise.
:param rules: one rule for each case (less) or three rules for each case (more)
:return:
model: Initialized ANFIS
"""
if rules == 'less':
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
]
# 14.07
rules = [[1, 0, 0, 6, 5, 1, 6, 0, 0, 6], [0, 4, 0, 2, 2, 0, 2, 0, 3, 2],
[2, 5, 3, 1, 1, 5, 1, 4, 5, 1], [0, 1, 0, 5, 4, 0, 5, 0, 1, 5],
[2, 2, 2, 4, 0, 4, 4, 3, 3, 4], [1, 6, 1, 0, 2, 3, 0, 1, 6, 0],
[1, 4, 1, 2, 3, 3, 2, 2, 4, 2], [1, 3, 0, 3, 4, 2, 3, 0, 2, 3]]
elif rules == 'more':
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 10))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 8))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 8))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 9))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 11))), ]
rules = [[4, 1, 6, 4, 0, 4, 0, 1, 4, 4], [3, 0, 8, 4, 3, 4, 3, 4, 4, 7],
[4, 5, 8, 4, 5, 4, 5, 6, 4, 8], [3, 0, 7, 4, 2, 4, 2, 3, 4, 6],
[4, 4, 8, 4, 4, 4, 4, 5, 4, 7], [4, 3, 9, 4, 7, 4, 7, 8, 4, 10],
[4, 3, 9, 4, 6, 4, 6, 7, 4, 9], [4, 2, 6, 4, 1, 4, 1, 2, 4, 5],
[0, 2, 5, 0, 0, 0, 1, 0, 0, 0], [4, 2, 1, 4, 0, 4, 1, 1, 4, 3],
[2, 2, 4, 2, 0, 2, 1, 1, 2, 2], [0, 2, 2, 0, 0, 0, 1, 1, 0, 2],
[0, 2, 5, 0, 0, 0, 1, 0, 0, 1], [3, 2, 0, 3, 0, 3, 1, 1, 3, 3],
[0, 2, 3, 0, 0, 0, 1, 0, 0, 2], [1, 2, 4, 1, 0, 1, 1, 1, 1, 2]]
outvars = ['c1']
model = anfis.AnfisNet('My_Anfis', invardefs, outvars, grid=False)
model.set_rules(rules)
return model
def my_model_class1():
"""
Initialization of the ANFIS classifier for virtual generic model. Multi-output.
Fuzzy reasoning and rules can be modified according to human-expertise.
:return:
model: Initialized ANFIS
"""
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 7))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 5))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 7)))]
outvars = ['dc1', 'dc2', 'dc3', 'dc4', 'dc5', 'dc6', 'dc7', 'dc8']
model = anfis.AnfisNet('My_Anfis', invardefs, outvars, hybrid=False, grid=False)
# 20.07
rules = [[0, 0, 1, 2, 6, 0, 0, 0, 1, 3], [3, 0, 0, 0, 2, 0, 3, 2, 0, 0],
[2, 3, 2, 1, 1, 2, 2, 2, 1, 4], [1, 0, 0, 1, 5, 0, 1, 1, 0, 2],
[0, 2, 2, 2, 4, 1, 0, 0, 1, 1], [4, 1, 1, 0, 0, 0, 4, 2, 0, 5],
[1, 1, 1, 1, 2, 0, 1, 4, 0, 6], [2, 0, 1, 1, 3, 0, 2, 3, 1, 3]]
model.set_rules(rules, hybrid=False)
return model
def classifier_rig(window="small"):
"""
Initialization of the ANFIS classifier for test rig. Multi-output.
Fuzzy reasoning and rules can be modified according to human-expertise.
:param window: "small", or "large" window size
:return:
model: Initialized ANFIS
"""
if window == "small":
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 6))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
]
# 22.07 small
rules = [[1, 0, 0, 2, 2, 0, 0, 2, 0, 2], [0, 2, 2, 1, 0, 1, 1, 1, 1, 1],
[0, 5, 1, 0, 1, 2, 1, 0, 1, 0], [0, 1, 2, 0, 1, 2, 1, 0, 1, 0],
[0, 4, 3, 1, 0, 1, 1, 1, 1, 1], [0, 3, 3, 1, 0, 1, 1, 1, 1, 1]]
else:
invardefs = [
('x0', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x1', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x2', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x3', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x4', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x5', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x6', make_gauss_mfs(0.5, np.linspace(0, 1, 2))),
('x7', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
('x8', make_gauss_mfs(0.5, np.linspace(0, 1, 4))),
('x9', make_gauss_mfs(0.5, np.linspace(0, 1, 3))),
]
# 22.07 Large
rules = [[2, 0, 0, 3, 0, 0, 0, 2, 0, 2], [0, 2, 3, 0, 1, 1, 1, 1, 3, 1],
[1, 1, 1, 2, 3, 2, 1, 0, 1, 0], [1, 2, 2, 0, 2, 3, 1, 0, 2, 0],
[0, 2, 3, 1, 1, 1, 1, 1, 3, 1]]
outvars = ['nc', 'dc1', 'dc2', 'dc3', 'dc4', 'dc5', 'dc6']
model = anfis.AnfisNet('My_Anfis', invardefs, outvars, hybrid=False, grid=False)
model.set_rules(rules, hybrid=False)
return model
| 48.357759
| 84
| 0.465282
| 2,084
| 11,219
| 2.416027
| 0.059021
| 0.047666
| 0.193049
| 0.206554
| 0.842105
| 0.807547
| 0.752334
| 0.73863
| 0.694935
| 0.662761
| 0
| 0.177698
| 0.307781
| 11,219
| 231
| 85
| 48.5671
| 0.470641
| 0.110972
| 0
| 0.434524
| 0
| 0
| 0.028924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029762
| false
| 0
| 0.017857
| 0
| 0.077381
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1264b004db744b0677678461e3779c59c91fdeb2
| 20,388
|
py
|
Python
|
src/python_pachyderm/proto/admin/v1_10/auth/auth_pb2_grpc.py
|
barretthinson/python-pachyderm
|
82cea22d1105d70833a5522ccac750ca521694ff
|
[
"Apache-2.0"
] | null | null | null |
src/python_pachyderm/proto/admin/v1_10/auth/auth_pb2_grpc.py
|
barretthinson/python-pachyderm
|
82cea22d1105d70833a5522ccac750ca521694ff
|
[
"Apache-2.0"
] | null | null | null |
src/python_pachyderm/proto/admin/v1_10/auth/auth_pb2_grpc.py
|
barretthinson/python-pachyderm
|
82cea22d1105d70833a5522ccac750ca521694ff
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from python_pachyderm.proto.admin.v1_10.auth import auth_pb2 as client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2
class APIStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Activate = channel.unary_unary(
'/auth_1_10.API/Activate',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ActivateRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ActivateResponse.FromString,
)
self.Deactivate = channel.unary_unary(
'/auth_1_10.API/Deactivate',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.DeactivateRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.DeactivateResponse.FromString,
)
self.GetConfiguration = channel.unary_unary(
'/auth_1_10.API/GetConfiguration',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetConfigurationRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetConfigurationResponse.FromString,
)
self.SetConfiguration = channel.unary_unary(
'/auth_1_10.API/SetConfiguration',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetConfigurationRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetConfigurationResponse.FromString,
)
self.GetAdmins = channel.unary_unary(
'/auth_1_10.API/GetAdmins',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAdminsRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAdminsResponse.FromString,
)
self.ModifyAdmins = channel.unary_unary(
'/auth_1_10.API/ModifyAdmins',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyAdminsRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyAdminsResponse.FromString,
)
self.Authenticate = channel.unary_unary(
'/auth_1_10.API/Authenticate',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthenticateRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthenticateResponse.FromString,
)
self.Authorize = channel.unary_unary(
'/auth_1_10.API/Authorize',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthorizeRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthorizeResponse.FromString,
)
self.WhoAmI = channel.unary_unary(
'/auth_1_10.API/WhoAmI',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.WhoAmIRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.WhoAmIResponse.FromString,
)
self.GetScope = channel.unary_unary(
'/auth_1_10.API/GetScope',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetScopeRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetScopeResponse.FromString,
)
self.SetScope = channel.unary_unary(
'/auth_1_10.API/SetScope',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetScopeRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetScopeResponse.FromString,
)
self.GetACL = channel.unary_unary(
'/auth_1_10.API/GetACL',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetACLRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetACLResponse.FromString,
)
self.SetACL = channel.unary_unary(
'/auth_1_10.API/SetACL',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetACLRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetACLResponse.FromString,
)
self.GetAuthToken = channel.unary_unary(
'/auth_1_10.API/GetAuthToken',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAuthTokenRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAuthTokenResponse.FromString,
)
self.ExtendAuthToken = channel.unary_unary(
'/auth_1_10.API/ExtendAuthToken',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ExtendAuthTokenRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ExtendAuthTokenResponse.FromString,
)
self.RevokeAuthToken = channel.unary_unary(
'/auth_1_10.API/RevokeAuthToken',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.RevokeAuthTokenRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.RevokeAuthTokenResponse.FromString,
)
self.SetGroupsForUser = channel.unary_unary(
'/auth_1_10.API/SetGroupsForUser',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetGroupsForUserRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetGroupsForUserResponse.FromString,
)
self.ModifyMembers = channel.unary_unary(
'/auth_1_10.API/ModifyMembers',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyMembersRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyMembersResponse.FromString,
)
self.GetGroups = channel.unary_unary(
'/auth_1_10.API/GetGroups',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetGroupsRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetGroupsResponse.FromString,
)
self.GetUsers = channel.unary_unary(
'/auth_1_10.API/GetUsers',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetUsersRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetUsersResponse.FromString,
)
self.GetOneTimePassword = channel.unary_unary(
'/auth_1_10.API/GetOneTimePassword',
request_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetOneTimePasswordRequest.SerializeToString,
response_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetOneTimePasswordResponse.FromString,
)
class APIServicer(object):
# missing associated documentation comment in .proto file
pass
def Activate(self, request, context):
"""Activate/Deactivate the auth API. 'Activate' sets an initial set of admins
for the Pachyderm cluster, and 'Deactivate' removes all ACLs, tokens, and
admins from the Pachyderm cluster, making all data publicly accessable
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Deactivate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetConfiguration(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetConfiguration(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAdmins(self, request, context):
"""GetAdmins returns the current list of cluster admins
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyAdmins(self, request, context):
"""ModifyAdmins adds or removes admins from the cluster
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Authenticate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Authorize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WhoAmI(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetScope(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetScope(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetACL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetACL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAuthToken(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExtendAuthToken(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RevokeAuthToken(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetGroupsForUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyMembers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGroups(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUsers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOneTimePassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_APIServicer_to_server(servicer, server):
rpc_method_handlers = {
'Activate': grpc.unary_unary_rpc_method_handler(
servicer.Activate,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ActivateRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ActivateResponse.SerializeToString,
),
'Deactivate': grpc.unary_unary_rpc_method_handler(
servicer.Deactivate,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.DeactivateRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.DeactivateResponse.SerializeToString,
),
'GetConfiguration': grpc.unary_unary_rpc_method_handler(
servicer.GetConfiguration,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetConfigurationRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetConfigurationResponse.SerializeToString,
),
'SetConfiguration': grpc.unary_unary_rpc_method_handler(
servicer.SetConfiguration,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetConfigurationRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetConfigurationResponse.SerializeToString,
),
'GetAdmins': grpc.unary_unary_rpc_method_handler(
servicer.GetAdmins,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAdminsRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAdminsResponse.SerializeToString,
),
'ModifyAdmins': grpc.unary_unary_rpc_method_handler(
servicer.ModifyAdmins,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyAdminsRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyAdminsResponse.SerializeToString,
),
'Authenticate': grpc.unary_unary_rpc_method_handler(
servicer.Authenticate,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthenticateRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthenticateResponse.SerializeToString,
),
'Authorize': grpc.unary_unary_rpc_method_handler(
servicer.Authorize,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthorizeRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.AuthorizeResponse.SerializeToString,
),
'WhoAmI': grpc.unary_unary_rpc_method_handler(
servicer.WhoAmI,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.WhoAmIRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.WhoAmIResponse.SerializeToString,
),
'GetScope': grpc.unary_unary_rpc_method_handler(
servicer.GetScope,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetScopeRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetScopeResponse.SerializeToString,
),
'SetScope': grpc.unary_unary_rpc_method_handler(
servicer.SetScope,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetScopeRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetScopeResponse.SerializeToString,
),
'GetACL': grpc.unary_unary_rpc_method_handler(
servicer.GetACL,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetACLRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetACLResponse.SerializeToString,
),
'SetACL': grpc.unary_unary_rpc_method_handler(
servicer.SetACL,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetACLRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetACLResponse.SerializeToString,
),
'GetAuthToken': grpc.unary_unary_rpc_method_handler(
servicer.GetAuthToken,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAuthTokenRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetAuthTokenResponse.SerializeToString,
),
'ExtendAuthToken': grpc.unary_unary_rpc_method_handler(
servicer.ExtendAuthToken,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ExtendAuthTokenRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ExtendAuthTokenResponse.SerializeToString,
),
'RevokeAuthToken': grpc.unary_unary_rpc_method_handler(
servicer.RevokeAuthToken,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.RevokeAuthTokenRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.RevokeAuthTokenResponse.SerializeToString,
),
'SetGroupsForUser': grpc.unary_unary_rpc_method_handler(
servicer.SetGroupsForUser,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetGroupsForUserRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.SetGroupsForUserResponse.SerializeToString,
),
'ModifyMembers': grpc.unary_unary_rpc_method_handler(
servicer.ModifyMembers,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyMembersRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.ModifyMembersResponse.SerializeToString,
),
'GetGroups': grpc.unary_unary_rpc_method_handler(
servicer.GetGroups,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetGroupsRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetGroupsResponse.SerializeToString,
),
'GetUsers': grpc.unary_unary_rpc_method_handler(
servicer.GetUsers,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetUsersRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetUsersResponse.SerializeToString,
),
'GetOneTimePassword': grpc.unary_unary_rpc_method_handler(
servicer.GetOneTimePassword,
request_deserializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetOneTimePasswordRequest.FromString,
response_serializer=client_dot_admin_dot_v1__10_dot_auth_dot_auth__pb2.GetOneTimePasswordResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'auth_1_10.API', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 52.411311
| 126
| 0.789288
| 2,413
| 20,388
| 6.123083
| 0.062992
| 0.080541
| 0.080541
| 0.0978
| 0.831201
| 0.831201
| 0.831201
| 0.738816
| 0.738816
| 0.738816
| 0
| 0.023555
| 0.146263
| 20,388
| 388
| 127
| 52.546392
| 0.82529
| 0.077055
| 0
| 0.32
| 1
| 0
| 0.09376
| 0.029173
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070769
| false
| 0.089231
| 0.006154
| 0
| 0.083077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
89e5574287500ff2d661d7eab1ca771e893b855e
| 1,481
|
py
|
Python
|
snmp/snmptest.py
|
stanislavb/snmp-lldp
|
66d1f418a05de3b3d74685a069b5a84035178b38
|
[
"Unlicense"
] | 25
|
2015-02-10T18:38:33.000Z
|
2021-07-31T11:22:37.000Z
|
snmp/snmptest.py
|
tsingliu1007/snmp-lldp
|
66d1f418a05de3b3d74685a069b5a84035178b38
|
[
"Unlicense"
] | 1
|
2015-07-03T18:01:29.000Z
|
2015-07-03T18:01:29.000Z
|
snmp/snmptest.py
|
tsingliu1007/snmp-lldp
|
66d1f418a05de3b3d74685a069b5a84035178b38
|
[
"Unlicense"
] | 7
|
2017-05-25T07:02:28.000Z
|
2020-10-05T07:36:55.000Z
|
#!/usr/bin/env python
import snmp
c = snmp.Connection("localhost")
l = [".1.3.6.1.2.1.1.1.0",
".1.3.6.1.2.1.1.2.0",
".1.3.6.1.2.1.1.3.0",
".1.3.6.1.2.1.1.4.0",
".1.3.6.1.2.1.1.5.0",
".1.3.6.1.2.1.1.6.0",
".1.3.6.1.2.1.1.7.0",
".1.3.6.1.2.1.1.8.0"]
lmixed = [".1.3.6.1.2.1.1.1.0",
".1.3.6.1.2.1.1.2.",
".1.3.6.1.2.1.1.3",
".1.3.6.1.2.1.1.",
".1.3.6.1.2.1.1",
".1.3.6.1.2.1.a.b.c.d",
"1.3.6.1.2.1.1.7.0",
"test",
""]
d = {"sysDescr": ".1.3.6.1.2.1.1.1.0",
"sysObjectID": ".1.3.6.1.2.1.1.2.0",
"sysUpTime": ".1.3.6.1.2.1.1.3.0",
"sysContact": ".1.3.6.1.2.1.1.4.0",
"sysName": ".1.3.6.1.2.1.1.5.0",
"sysLocation": ".1.3.6.1.2.1.1.6.0",
"sysServices": ".1.3.6.1.2.1.1.7.0",
"sysORLastChange": ".1.3.6.1.2.1.1.8.0"}
dmixed = {"sysDescr": ".1.3.6.1.2.1.1.1.0",
"sysObjectID": ".1.3.6.1.2.1.1.2.",
"sysUpTime": ".1.3.6.1.2.1.1.3",
"sysContact": ".1.3.6.1.2.1.1.",
"sysName": ".1.3.6.1.2.1.1",
"sysLocation": ".1.3.6.1.2.1.a.b.c.d",
"sysServices": "1.3.6.1.2.1.1.7.0",
"sysORLastChange": "test",
"nonetest": ""}
# Legit input
r = c.get(".1.3.6.1.2.1.1.1.0")
print r
print not r
r = c.walk(".1.3.6.1.2.1.1.1")
print r
print not r
r = c.populateDict(d)
print r
print not r
r = c.populateList(l)
print r
print not r
r = c.dictGet(d)
print r
print not r
| 23.140625
| 48
| 0.442944
| 355
| 1,481
| 1.847887
| 0.123944
| 0.115854
| 0.150915
| 0.195122
| 0.801829
| 0.801829
| 0.775915
| 0.63872
| 0.541159
| 0.320122
| 0
| 0.237633
| 0.235652
| 1,481
| 63
| 49
| 23.507937
| 0.341873
| 0.021607
| 0
| 0.196078
| 0
| 0
| 0.514858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.019608
| null | null | 0.196078
| 0
| 0
| 1
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c3cef5eba4ae53d0691e2bdc8ec5e704f6415baa
| 28,123
|
py
|
Python
|
sdk/python/pulumiverse_unifi/user.py
|
pulumiverse/pulumi-unifi
|
e22e1bef9b409c71ad578b5d9e39284a26da355c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-29T18:49:28.000Z
|
2022-03-29T18:49:28.000Z
|
sdk/python/pulumiverse_unifi/user.py
|
pulumiverse/pulumi-unifi
|
e22e1bef9b409c71ad578b5d9e39284a26da355c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumiverse_unifi/user.py
|
pulumiverse/pulumi-unifi
|
e22e1bef9b409c71ad578b5d9e39284a26da355c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
mac: pulumi.Input[str],
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
pulumi.set(__self__, "mac", mac)
if allow_existing is not None:
pulumi.set(__self__, "allow_existing", allow_existing)
if blocked is not None:
pulumi.set(__self__, "blocked", blocked)
if dev_id_override is not None:
pulumi.set(__self__, "dev_id_override", dev_id_override)
if fixed_ip is not None:
pulumi.set(__self__, "fixed_ip", fixed_ip)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if note is not None:
pulumi.set(__self__, "note", note)
if site is not None:
pulumi.set(__self__, "site", site)
if skip_forget_on_destroy is not None:
pulumi.set(__self__, "skip_forget_on_destroy", skip_forget_on_destroy)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter
def mac(self) -> pulumi.Input[str]:
"""
The MAC address of the user.
"""
return pulumi.get(self, "mac")
@mac.setter
def mac(self, value: pulumi.Input[str]):
pulumi.set(self, "mac", value)
@property
@pulumi.getter(name="allowExisting")
def allow_existing(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
"""
return pulumi.get(self, "allow_existing")
@allow_existing.setter
def allow_existing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_existing", value)
@property
@pulumi.getter
def blocked(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this user should be blocked from the network.
"""
return pulumi.get(self, "blocked")
@blocked.setter
def blocked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blocked", value)
@property
@pulumi.getter(name="devIdOverride")
def dev_id_override(self) -> Optional[pulumi.Input[int]]:
"""
Override the device fingerprint.
"""
return pulumi.get(self, "dev_id_override")
@dev_id_override.setter
def dev_id_override(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dev_id_override", value)
@property
@pulumi.getter(name="fixedIp")
def fixed_ip(self) -> Optional[pulumi.Input[str]]:
"""
A fixed IPv4 address for this user.
"""
return pulumi.get(self, "fixed_ip")
@fixed_ip.setter
def fixed_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fixed_ip", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network ID for this user.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter
def note(self) -> Optional[pulumi.Input[str]]:
"""
A note with additional information for the user.
"""
return pulumi.get(self, "note")
@note.setter
def note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note", value)
@property
@pulumi.getter
def site(self) -> Optional[pulumi.Input[str]]:
"""
The name of the site to associate the user with.
"""
return pulumi.get(self, "site")
@site.setter
def site(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site", value)
@property
@pulumi.getter(name="skipForgetOnDestroy")
def skip_forget_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
"""
return pulumi.get(self, "skip_forget_on_destroy")
@skip_forget_on_destroy.setter
def skip_forget_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_forget_on_destroy", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The user group ID for the user.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None,
mac: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] hostname: The hostname of the user.
:param pulumi.Input[str] ip: The IP address of the user.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
if allow_existing is not None:
pulumi.set(__self__, "allow_existing", allow_existing)
if blocked is not None:
pulumi.set(__self__, "blocked", blocked)
if dev_id_override is not None:
pulumi.set(__self__, "dev_id_override", dev_id_override)
if fixed_ip is not None:
pulumi.set(__self__, "fixed_ip", fixed_ip)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ip is not None:
pulumi.set(__self__, "ip", ip)
if mac is not None:
pulumi.set(__self__, "mac", mac)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if note is not None:
pulumi.set(__self__, "note", note)
if site is not None:
pulumi.set(__self__, "site", site)
if skip_forget_on_destroy is not None:
pulumi.set(__self__, "skip_forget_on_destroy", skip_forget_on_destroy)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="allowExisting")
def allow_existing(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
"""
return pulumi.get(self, "allow_existing")
@allow_existing.setter
def allow_existing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_existing", value)
@property
@pulumi.getter
def blocked(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this user should be blocked from the network.
"""
return pulumi.get(self, "blocked")
@blocked.setter
def blocked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blocked", value)
@property
@pulumi.getter(name="devIdOverride")
def dev_id_override(self) -> Optional[pulumi.Input[int]]:
"""
Override the device fingerprint.
"""
return pulumi.get(self, "dev_id_override")
@dev_id_override.setter
def dev_id_override(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dev_id_override", value)
@property
@pulumi.getter(name="fixedIp")
def fixed_ip(self) -> Optional[pulumi.Input[str]]:
"""
A fixed IPv4 address for this user.
"""
return pulumi.get(self, "fixed_ip")
@fixed_ip.setter
def fixed_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fixed_ip", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The hostname of the user.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of the user.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def mac(self) -> Optional[pulumi.Input[str]]:
"""
The MAC address of the user.
"""
return pulumi.get(self, "mac")
@mac.setter
def mac(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mac", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network ID for this user.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter
def note(self) -> Optional[pulumi.Input[str]]:
"""
A note with additional information for the user.
"""
return pulumi.get(self, "note")
@note.setter
def note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note", value)
@property
@pulumi.getter
def site(self) -> Optional[pulumi.Input[str]]:
"""
The name of the site to associate the user with.
"""
return pulumi.get(self, "site")
@site.setter
def site(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site", value)
@property
@pulumi.getter(name="skipForgetOnDestroy")
def skip_forget_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
"""
return pulumi.get(self, "skip_forget_on_destroy")
@skip_forget_on_destroy.setter
def skip_forget_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_forget_on_destroy", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The user group ID for the user.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
mac: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`User` manages a user (or "client" in the UI) of the network, these are identified by unique MAC addresses.
Users are created in the controller when observed on the network, so the resource defaults to allowing itself to just take over management of a MAC address, but this can be turned off.
## Example Usage
```python
import pulumi
import pulumiverse_unifi as unifi
test = unifi.User("test",
mac="01:23:45:67:89:AB",
note="my note",
fixed_ip="10.0.0.50",
network_id=unifi_network["my_vlan"]["id"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`User` manages a user (or "client" in the UI) of the network, these are identified by unique MAC addresses.
Users are created in the controller when observed on the network, so the resource defaults to allowing itself to just take over management of a MAC address, but this can be turned off.
## Example Usage
```python
import pulumi
import pulumiverse_unifi as unifi
test = unifi.User("test",
mac="01:23:45:67:89:AB",
note="my note",
fixed_ip="10.0.0.50",
network_id=unifi_network["my_vlan"]["id"])
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
mac: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["allow_existing"] = allow_existing
__props__.__dict__["blocked"] = blocked
__props__.__dict__["dev_id_override"] = dev_id_override
__props__.__dict__["fixed_ip"] = fixed_ip
if mac is None and not opts.urn:
raise TypeError("Missing required property 'mac'")
__props__.__dict__["mac"] = mac
__props__.__dict__["name"] = name
__props__.__dict__["network_id"] = network_id
__props__.__dict__["note"] = note
__props__.__dict__["site"] = site
__props__.__dict__["skip_forget_on_destroy"] = skip_forget_on_destroy
__props__.__dict__["user_group_id"] = user_group_id
__props__.__dict__["hostname"] = None
__props__.__dict__["ip"] = None
super(User, __self__).__init__(
'unifi:index/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None,
mac: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] hostname: The hostname of the user.
:param pulumi.Input[str] ip: The IP address of the user.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["allow_existing"] = allow_existing
__props__.__dict__["blocked"] = blocked
__props__.__dict__["dev_id_override"] = dev_id_override
__props__.__dict__["fixed_ip"] = fixed_ip
__props__.__dict__["hostname"] = hostname
__props__.__dict__["ip"] = ip
__props__.__dict__["mac"] = mac
__props__.__dict__["name"] = name
__props__.__dict__["network_id"] = network_id
__props__.__dict__["note"] = note
__props__.__dict__["site"] = site
__props__.__dict__["skip_forget_on_destroy"] = skip_forget_on_destroy
__props__.__dict__["user_group_id"] = user_group_id
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowExisting")
def allow_existing(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
"""
return pulumi.get(self, "allow_existing")
@property
@pulumi.getter
def blocked(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether this user should be blocked from the network.
"""
return pulumi.get(self, "blocked")
@property
@pulumi.getter(name="devIdOverride")
def dev_id_override(self) -> pulumi.Output[Optional[int]]:
"""
Override the device fingerprint.
"""
return pulumi.get(self, "dev_id_override")
@property
@pulumi.getter(name="fixedIp")
def fixed_ip(self) -> pulumi.Output[Optional[str]]:
"""
A fixed IPv4 address for this user.
"""
return pulumi.get(self, "fixed_ip")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
The hostname of the user.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def ip(self) -> pulumi.Output[str]:
"""
The IP address of the user.
"""
return pulumi.get(self, "ip")
@property
@pulumi.getter
def mac(self) -> pulumi.Output[str]:
"""
The MAC address of the user.
"""
return pulumi.get(self, "mac")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkId")
def network_id(self) -> pulumi.Output[Optional[str]]:
"""
The network ID for this user.
"""
return pulumi.get(self, "network_id")
@property
@pulumi.getter
def note(self) -> pulumi.Output[Optional[str]]:
"""
A note with additional information for the user.
"""
return pulumi.get(self, "note")
@property
@pulumi.getter
def site(self) -> pulumi.Output[str]:
"""
The name of the site to associate the user with.
"""
return pulumi.get(self, "site")
@property
@pulumi.getter(name="skipForgetOnDestroy")
def skip_forget_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
"""
return pulumi.get(self, "skip_forget_on_destroy")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[Optional[str]]:
"""
The user group ID for the user.
"""
return pulumi.get(self, "user_group_id")
| 39.777935
| 192
| 0.623262
| 3,503
| 28,123
| 4.781901
| 0.058521
| 0.104412
| 0.087756
| 0.089308
| 0.895469
| 0.884485
| 0.859889
| 0.848606
| 0.842218
| 0.830338
| 0
| 0.001941
| 0.26729
| 28,123
| 706
| 193
| 39.834278
| 0.810977
| 0.279949
| 0
| 0.808717
| 1
| 0
| 0.077059
| 0.011744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164649
| false
| 0.002421
| 0.012107
| 0
| 0.276029
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7f090cbbe79053c92893788470dd061620f72fd9
| 21,222
|
py
|
Python
|
src/make_model.py
|
statsu1990/ReZero-Cifar100
|
b7b7c24293726283b346dac5a3fa15bd5f0de74c
|
[
"MIT"
] | 3
|
2020-03-29T06:30:03.000Z
|
2020-04-03T02:09:03.000Z
|
src/make_model.py
|
statsu1990/ReZero-Cifar100
|
b7b7c24293726283b346dac5a3fa15bd5f0de74c
|
[
"MIT"
] | null | null | null |
src/make_model.py
|
statsu1990/ReZero-Cifar100
|
b7b7c24293726283b346dac5a3fa15bd5f0de74c
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import albumentations as alb
from albumentations.augmentations import transforms as albtr
from albumentations.pytorch import ToTensor as albToTensor
import torch.nn as nn
import torch.optim as optim
from data import cifar, torch_data_utils
from model import preact_resnet, rezero_preact_resnet, rezero2_preact_resnet
from train import training
def get_checkpoint(path):
cp = torch.load(path, map_location=lambda storage, loc: storage)
return cp
def make_PreactResnet():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = preact_resnet.PreActResNet18(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'preact18_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_RezeroPreactResnet():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = rezero_preact_resnet.PreActResNet18(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
model = model.cuda()
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'rezero_preact18_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_Rezero2PreactResnet():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = rezero2_preact_resnet.PreActResNet18(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
model = model.cuda()
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'rezero2_preact18_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_PreactResnet50():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = preact_resnet.PreActResNet50(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'preact50_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_RezeroPreactResnet50():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = rezero_preact_resnet.PreActResNet50(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'rezero_preact50_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_PreactResnet152():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = preact_resnet.PreActResNet152(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'preact152_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_RezeroPreactResnet152():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = rezero_preact_resnet.PreActResNet152(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'rezero_preact152_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
def make_Rezero2PreactResnet152():
DOWNLOAD = False
CHECKPOINT_PATH = None #'checkpoint', None
FINE_TURNING = False
CP = get_checkpoint(CHECKPOINT_PATH) if CHECKPOINT_PATH is not None else None
## data
# transformer
tr_transformer = alb.Compose([
albtr.Flip(p=0.5),
albtr.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=15, p=0.5),
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
ts_transformer = alb.Compose([
albtr.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)),
albToTensor()
])
# dataset
tr_ds = cifar.get_dataset_cifar100(True, DOWNLOAD, torch_data_utils.ImgDataset, tr_transformer)
ts_ds = cifar.get_dataset_cifar100(False, DOWNLOAD, torch_data_utils.ImgDataset, ts_transformer)
## model
model = rezero2_preact_resnet.PreActResNet152(num_classes=100)
if CP is not None:
model.load_state_dict(CP['state_dict'])
USE_LABEL = False
## training
TR_BATCH_SIZE = 128
TS_BATCH_SIZE = 512
tr_loader = torch_data_utils.get_dataloader(tr_ds, TR_BATCH_SIZE)
ts_loader = torch_data_utils.get_dataloader(ts_ds, TS_BATCH_SIZE, shuffle=False)
LR = 0.1
opt = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
if CP is not None:
if not FINE_TURNING:
opt.load_state_dict(CP['optimizer'])
tr_criterion = nn.CrossEntropyLoss()
vl_criterion = nn.CrossEntropyLoss()
grad_accum_steps = 1
start_epoch = 0 if CP is None or FINE_TURNING else CP['epoch']
EPOCHS = 200
warmup_epoch=1
step_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[60, 120, 160], gamma=0.2) #learning rate decay
filename_head = 'rezero2_preact152_'
model = training.train_model(model, tr_loader, ts_loader, USE_LABEL,
opt, tr_criterion, vl_criterion,
grad_accum_steps, start_epoch, EPOCHS,
warmup_epoch, step_scheduler, filename_head)
# save
torch.save(model.state_dict(), filename_head + '_model')
return
| 40.811538
| 171
| 0.621949
| 2,451
| 21,222
| 5.126071
| 0.05916
| 0.023639
| 0.036772
| 0.03311
| 0.952802
| 0.952802
| 0.952802
| 0.952802
| 0.952802
| 0.952802
| 0
| 0.13955
| 0.291584
| 21,222
| 519
| 172
| 40.890173
| 0.696155
| 0.030911
| 0
| 0.891892
| 0
| 0
| 0.017636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024324
| false
| 0
| 0.035135
| 0
| 0.083784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f135ff1781dd7ddc1429b2ed006ab83c03f03a5
| 5,823
|
py
|
Python
|
tests/synthdefs/test_synthdefs_SynthDef_lifecycle.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 191
|
2015-11-13T02:28:42.000Z
|
2022-03-29T10:26:44.000Z
|
tests/synthdefs/test_synthdefs_SynthDef_lifecycle.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 130
|
2016-01-04T16:59:02.000Z
|
2022-02-26T15:37:20.000Z
|
tests/synthdefs/test_synthdefs_SynthDef_lifecycle.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 22
|
2016-05-04T10:32:16.000Z
|
2022-02-26T19:22:45.000Z
|
import supriya
def test_unaggregated_anonymous(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build()
assert synthdef not in server
with server.osc_protocol.capture() as transcript:
synthdef.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_recv", synthdef.compile())
]
with server.osc_protocol.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_free", synthdef.anonymous_name)
]
def test_unaggregated_named(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build(name="test-synthdef")
assert synthdef not in server
with server.osc_protocol.capture() as transcript:
synthdef.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_recv", synthdef.compile())
]
with server.osc_protocol.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_free", synthdef.name)
]
def test_aggregated_anonymous(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build()
assert synthdef not in server
synth_a = supriya.Synth(synthdef=synthdef, frequency=666)
synth_b = supriya.Synth(synthdef=synthdef, frequency=777)
synth_c = supriya.Synth(synthdef=synthdef, frequency=888)
# allocate synthdef on node allocation
with server.osc_protocol.capture() as transcript:
synth_a.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
"/d_recv",
synthdef.compile(),
supriya.osc.OscMessage(
"/s_new", synthdef.anonymous_name, 1000, 0, 1, "frequency", 666.0
),
)
]
# don't need to re-allocate
with server.osc_protocol.capture() as transcript:
synth_b.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
"/s_new", synthdef.anonymous_name, 1001, 0, 1, "frequency", 777.0
)
]
# just free the synthdef
with server.osc_protocol.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_free", synthdef.anonymous_name)
]
# allocate synthdef (again)n on node allocation
with server.osc_protocol.capture() as transcript:
synth_c.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
"/d_recv",
synthdef.compile(),
supriya.osc.OscMessage(
"/s_new", synthdef.anonymous_name, 1002, 0, 1, "frequency", 888.0
),
)
]
def test_aggregated_named(server):
with supriya.SynthDefBuilder(frequency=440) as builder:
source = supriya.ugens.SinOsc.ar(frequency=builder["frequency"])
supriya.ugens.Out.ar(bus=0, source=source)
synthdef = builder.build(name="test-synthdef")
assert synthdef not in server
synth_a = supriya.Synth(synthdef=synthdef, frequency=666)
synth_b = supriya.Synth(synthdef=synthdef, frequency=777)
synth_c = supriya.Synth(synthdef=synthdef, frequency=888)
# allocate synthdef on node allocation
with server.osc_protocol.capture() as transcript:
synth_a.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
"/d_recv",
synthdef.compile(),
supriya.osc.OscMessage(
"/s_new", synthdef.name, 1000, 0, 1, "frequency", 666.0
),
)
]
# don't need to re-allocate
with server.osc_protocol.capture() as transcript:
synth_b.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/s_new", synthdef.name, 1001, 0, 1, "frequency", 777.0)
]
# just free the synthdef
with server.osc_protocol.capture() as transcript:
synthdef.free()
assert synthdef not in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage("/d_free", synthdef.name)
]
# allocate synthdef (again)n on node allocation
with server.osc_protocol.capture() as transcript:
synth_c.allocate(server=server)
assert synthdef in server
assert [message for timestamp, message in transcript.sent_messages] == [
supriya.osc.OscMessage(
"/d_recv",
synthdef.compile(),
supriya.osc.OscMessage(
"/s_new", synthdef.name, 1002, 0, 1, "frequency", 888.0
),
)
]
| 37.326923
| 87
| 0.666838
| 687
| 5,823
| 5.557496
| 0.098981
| 0.06286
| 0.083814
| 0.066003
| 0.977737
| 0.977737
| 0.977737
| 0.967784
| 0.965427
| 0.965427
| 0
| 0.020959
| 0.229778
| 5,823
| 155
| 88
| 37.567742
| 0.830323
| 0.045166
| 0
| 0.782946
| 0
| 0
| 0.039993
| 0
| 0
| 0
| 0
| 0
| 0.217054
| 1
| 0.031008
| false
| 0
| 0.007752
| 0
| 0.03876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
61360b112cfde882f088c50d77334c32380948ea
| 11,594
|
py
|
Python
|
tts/integration_test/tts_integration.py
|
aws-robotics/tts-ros2
|
f8278df1634b3806f26fe51b0af6f11e0cbcdafb
|
[
"Apache-2.0"
] | 8
|
2019-08-07T14:07:13.000Z
|
2021-10-30T02:48:48.000Z
|
tts/integration_test/tts_integration.py
|
aws-robotics/tts-ros2
|
f8278df1634b3806f26fe51b0af6f11e0cbcdafb
|
[
"Apache-2.0"
] | 29
|
2019-08-20T21:55:02.000Z
|
2021-12-15T16:05:45.000Z
|
tts/integration_test/tts_integration.py
|
aws-robotics/tts-ros2
|
f8278df1634b3806f26fe51b0af6f11e0cbcdafb
|
[
"Apache-2.0"
] | 5
|
2019-08-29T22:34:18.000Z
|
2021-10-30T02:48:41.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018, Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import unittest
import rclpy
import launch_testing
from launch import LaunchDescription
from launch.actions import OpaqueFunction
from launch_ros.actions import Node
from tts_interfaces.srv import Polly
from tts_interfaces.srv import Synthesizer
def generate_test_description(ready_fn):
polly_server = Node(package='tts', node_executable='polly_server', additional_env={'PYTHONUNBUFFERED': '1'}, output='screen')
synthesizer_server = Node(package='tts', node_executable='synthesizer_server', additional_env={'PYTHONUNBUFFERED': '1'}, output='screen')
launch_description = LaunchDescription([
polly_server,
synthesizer_server,
OpaqueFunction(function=lambda context: ready_fn()),
])
return launch_description, locals()
class TestPlainText(unittest.TestCase):
@classmethod
def setUpClass(cls):
rclpy.init()
@classmethod
def tearDownClass(cls):
rclpy.shutdown()
def test_plain_text_to_wav_via_polly_node(self):
node = rclpy.create_node('integtest')
client = node.create_client(Polly, 'polly')
retries = 0
while not client.wait_for_service(timeout_sec=2.0):
retries += 1
self.failIf(retries > 3, 'service is not available')
test_text = 'Mary has a little lamb, little lamb, little lamb.'
request = Polly.Request(polly_action='SynthesizeSpeech', text=test_text)
future = client.call_async(request)
while rclpy.ok():
rclpy.spin_once(node)
if future.done():
self.failIf(future.result() is None, 'nothing is returned')
break
print('Waiting for service to be done.')
res = future.result()
self.assertIsNotNone(res)
self.assertTrue(type(res) is Polly.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/pcm', audio_type)
self.assertTrue(audio_file.endswith('.wav'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*WAVE audio.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
node.destroy_node()
def test_plain_text_using_polly_class(self):
from tts.services.amazonpolly import AmazonPolly
polly = AmazonPolly()
test_text = 'Mary has a little lamb, little lamb, little lamb.'
res = polly.synthesize(text=test_text)
self.assertIsNotNone(res)
self.assertTrue(type(res) is Polly.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/pcm', audio_type)
self.assertTrue(audio_file.endswith('.wav'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*WAVE audio.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
def test_plain_text_via_synthesizer_node(self):
node = rclpy.create_node('integtest')
client = node.create_client(Synthesizer, 'synthesizer')
retries = 0
while not client.wait_for_service(timeout_sec=2.0):
retries += 1
self.failIf(retries > 3, 'service is not available')
test_text = 'Mary has a little lamb, little lamb, little lamb.'
request = Synthesizer.Request(text=test_text)
future = client.call_async(request)
while rclpy.ok():
rclpy.spin_once(node)
if future.done():
self.failIf(future.result() is None, 'nothing is returned')
break
print('Waiting for service to be done.')
res = future.result()
self.assertIsNotNone(res)
self.assertTrue(type(res) is Synthesizer.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/pcm', audio_type)
self.assertTrue(audio_file.endswith('.wav'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*WAVE audio.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
node.destroy_node()
def test_plain_text_to_mp3_via_polly_node(self):
node = rclpy.create_node('integtest')
client = node.create_client(Polly, 'polly')
retries = 0
while not client.wait_for_service(timeout_sec=2.0):
retries += 1
self.failIf(retries > 3, 'service is not available')
test_text = 'Mary has a little lamb, little lamb, little lamb.'
request = Polly.Request(polly_action='SynthesizeSpeech', text=test_text, output_format='mp3')
future = client.call_async(request)
while rclpy.ok():
rclpy.spin_once(node)
if future.done():
self.failIf(future.result() is None, 'nothing is returned')
break
print('Waiting for service to be done.')
res = future.result()
self.assertIsNotNone(res)
self.assertTrue(type(res) is Polly.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/mpeg', audio_type)
self.assertTrue(audio_file.endswith('.mp3'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*MPEG.*layer III.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
node.destroy_node()
def test_simple_ssml_via_polly_node(self):
node = rclpy.create_node('integtest')
client = node.create_client(Polly, 'polly')
retries = 0
while not client.wait_for_service(timeout_sec=2.0):
retries += 1
self.failIf(retries > 3, 'service is not available')
text = '<speak>Mary has a little lamb, little lamb, little lamb.</speak>'
request = Polly.Request(polly_action='SynthesizeSpeech', text=text, text_type='ssml')
future = client.call_async(request)
while rclpy.ok():
rclpy.spin_once(node)
if future.done():
self.failIf(future.result() is None, 'nothing is returned')
break
print('Waiting for service to be done.')
res = future.result()
self.assertIsNotNone(res)
self.assertTrue(type(res) is Polly.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/pcm', audio_type)
self.assertTrue(audio_file.endswith('.wav'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*WAVE audio.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
node.destroy_node()
def test_simple_ssml_via_synthesizer_node(self):
node = rclpy.create_node('integtest')
client = node.create_client(Synthesizer, 'synthesizer')
retries = 0
while not client.wait_for_service(timeout_sec=2.0):
retries += 1
self.failIf(retries > 3, 'service is not available')
text = '<speak>Mary has a little lamb, little lamb, little lamb.</speak>'
request = Synthesizer.Request(text=text, metadata='''{"text_type":"ssml"}''')
future = client.call_async(request)
while rclpy.ok():
rclpy.spin_once(node)
if future.done():
self.failIf(future.result() is None, 'nothing is returned')
break
print('Waiting for service to be done.')
res = future.result()
self.assertIsNotNone(res)
self.assertTrue(type(res) is Synthesizer.Response)
r = json.loads(res.result)
self.assertIn('Audio Type', r, 'result should contain audio type')
self.assertIn('Audio File', r, 'result should contain file path')
self.assertIn('Amazon Polly Response Metadata', r, 'result should contain metadata')
audio_type = r['Audio Type']
audio_file = r['Audio File']
md = r['Amazon Polly Response Metadata']
self.assertTrue("'HTTPStatusCode': 200," in md)
self.assertEqual('audio/pcm', audio_type)
self.assertTrue(audio_file.endswith('.wav'))
import subprocess
o = subprocess.check_output(['file', audio_file], stderr=subprocess.STDOUT)
import re
m = re.search(r'.*WAVE audio.*', o.decode('utf-8'), flags=re.MULTILINE)
self.assertIsNotNone(m)
node.destroy_node()
@launch_testing.post_shutdown_test()
class TestNodesStatusAfterShutdown(unittest.TestCase):
def test_processes_finished_gracefully(self, proc_info):
"""Test that both executables finished gracefully."""
launch_testing.asserts.assertExitCodes(proc_info)
| 38.138158
| 141
| 0.642229
| 1,425
| 11,594
| 5.117895
| 0.146667
| 0.037022
| 0.032086
| 0.049362
| 0.814891
| 0.805019
| 0.795695
| 0.77115
| 0.77115
| 0.77115
| 0
| 0.007075
| 0.244178
| 11,594
| 303
| 142
| 38.264026
| 0.825174
| 0.052613
| 0
| 0.80531
| 0
| 0
| 0.222962
| 0
| 0
| 0
| 0
| 0
| 0.243363
| 1
| 0.044248
| false
| 0
| 0.097345
| 0
| 0.154867
| 0.022124
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
614c699db48515e272c53839f15d9740180db5ea
| 33,205
|
py
|
Python
|
tests/test_abstract.py
|
jpivarski/doremi
|
0f8fb1fc8e9664b2e4b61fffc5382e41d8d624d6
|
[
"BSD-3-Clause"
] | 1
|
2022-01-09T00:32:44.000Z
|
2022-01-09T00:32:44.000Z
|
tests/test_abstract.py
|
jpivarski/doremi
|
0f8fb1fc8e9664b2e4b61fffc5382e41d8d624d6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_abstract.py
|
jpivarski/doremi
|
0f8fb1fc8e9664b2e4b61fffc5382e41d8d624d6
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/jpivarski/doremi/blob/main/LICENSE
from fractions import Fraction
import pytest
from lark.tree import Tree
from lark.lexer import Token
from doremi.abstract import (
AbstractNote,
Scope,
Word,
Call,
AugmentStep,
AugmentDegree,
AugmentRatio,
Duration,
Modified,
Line,
Assignment,
NamedPassage,
UnnamedPassage,
evaluate,
Collection,
abstracttree,
SymbolAllUnderscores,
MismatchingArguments,
RecursiveFunction,
)
def test_decorations():
assert abstracttree("la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("1st") == Collection(
[UnnamedPassage([Line([Modified(Word("1st"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("!la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 1, 0, 0, None, None, 1)])])]
)
assert abstracttree("!!la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 2, 0, 0, None, None, 1)])])]
)
assert abstracttree("@la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 1, 0, None, None, 1)])])]
)
assert abstracttree("@ @ la") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 2, 0, None, None, 1)])])]
)
assert abstracttree("la'") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree("la''") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 2, None, None, 1)])])]
)
assert abstracttree("la '") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree(" la '") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 1, None, None, 1)])])]
)
assert abstracttree(" la ''") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 2, None, None, 1)])])]
)
assert abstracttree("la'3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la '3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree(" la' 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree(" la ' 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la'3 ") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 3, None, None, 1)])])]
)
assert abstracttree("la,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree("la,,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -2, None, None, 1)])])]
)
assert abstracttree("la ,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree(" la ,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -1, None, None, 1)])])]
)
assert abstracttree(" la ,,") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -2, None, None, 1)])])]
)
assert abstracttree("la,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree("la ,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la, 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la , 3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree(" la ,3") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, -3, None, None, 1)])])]
)
assert abstracttree("la+") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(1), None, 1)])]
)
]
)
assert abstracttree("la ++") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(2), None, 1)])]
)
]
)
assert abstracttree("la+2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(2), None, 1)])]
)
]
)
assert abstracttree("la-2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(-2), None, 1)])]
)
]
)
assert abstracttree("la- 3") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentStep(-3), None, 1)])]
)
]
)
assert abstracttree("la>") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(1), None, 1)])]
)
]
)
assert abstracttree("la >>") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(2), None, 1)])]
)
]
)
assert abstracttree("la>2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(2), None, 1)])]
)
]
)
assert abstracttree("la<2") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(-2), None, 1)])]
)
]
)
assert abstracttree("la< 3") == Collection(
[
UnnamedPassage(
[Line([Modified(Word("la"), 0, 0, 0, AugmentDegree(-3), None, 1)])]
)
]
)
assert abstracttree("la%2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
AugmentRatio(Fraction(2, 1)),
None,
1,
)
]
)
],
)
]
)
assert abstracttree("la%2/3") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
AugmentRatio(Fraction(2, 3)),
None,
1,
)
]
)
],
)
]
)
assert abstracttree("la...") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 1), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 1), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3/2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 2), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la:3 / 2") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
0,
0,
None,
Duration(Fraction(3, 2), False),
1,
)
]
)
],
)
]
)
assert abstracttree("la * 4") == Collection(
[UnnamedPassage([Line([Modified(Word("la"), 0, 0, 0, None, None, 4)])])]
)
assert abstracttree("@ la'+... * 4") == Collection(
[
UnnamedPassage(
[
Line(
[
Modified(
Word("la"),
0,
1,
1,
AugmentStep(1),
Duration(Fraction(3, 1), False),
4,
)
]
)
],
)
]
)
def test_call():
aug1 = AugmentStep(1)
dur3 = Duration(Fraction(3, 1), False)
dur32 = Duration(Fraction(3, 2), False)
x = Modified(Word("x"), 0, 0, 0, None, None, 1)
y = Modified(Word("y"), 0, 0, 0, None, None, 1)
assert abstracttree("f") == Collection(
[UnnamedPassage([Line([Modified(Word("f"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("f()") == Collection(
[UnnamedPassage([Line([Modified(Word("f"), 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("f(x)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x]), 0, 0, 0, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, None, 1)])]
)
]
)
assert abstracttree("@f(x y)") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 1, 0, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)'") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 1, None, None, 1)])]
)
]
)
assert abstracttree("f(x y)+") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, aug1, None, 1)])]
)
]
)
assert abstracttree("f(x y)...") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, dur3, 1)])]
)
]
)
assert abstracttree("f(x y):3/2") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, dur32, 1)])]
)
]
)
assert abstracttree("f(x y) * 4") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 0, 0, None, None, 4)])]
)
]
)
assert abstracttree("@f(x y)'+:3/2 * 4") == Collection(
[
UnnamedPassage(
[Line([Modified(Call(Word("f"), [x, y]), 0, 1, 1, aug1, dur32, 4)])]
)
]
)
def test_modified():
aug1 = AugmentStep(1)
dur3 = Duration(Fraction(3, 1), False)
dur32 = Duration(Fraction(3, 2), False)
dur32True = Duration(Fraction(3, 2), True)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("{la la la}") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, None, 1)])])]
)
assert abstracttree("@{la la la}") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 0, None, None, 1)])])]
)
assert abstracttree("{la la la}'") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 1, None, None, 1)])])]
)
assert abstracttree("{la la la}+") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, aug1, None, 1)])])]
)
assert abstracttree("{la la la}...") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur3, 1)])])]
)
assert abstracttree("{la la la}:3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32, 1)])])]
)
assert abstracttree("{la la la} : 3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32, 1)])])]
)
assert abstracttree("{la la la}:*3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32True, 1)])])]
)
assert abstracttree("{la la la} :* 3/2") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, dur32True, 1)])])]
)
assert abstracttree("{la la la} * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 0, 0, None, None, 4)])])]
)
assert abstracttree("@{la la la}'+:3/2 * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 1, aug1, dur32, 4)])])]
)
assert abstracttree("@{la la la}'+:*3/2 * 4") == Collection(
[UnnamedPassage([Line([Modified([la, la, la], 0, 1, 1, aug1, dur32True, 4)])])]
)
def test_passage():
do = Modified(Word("do"), 0, 0, 0, None, None, 1)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("do") == Collection([UnnamedPassage([Line([do])])])
assert abstracttree("do\nla") == Collection(
[UnnamedPassage([Line([do]), Line([la])])]
)
assert abstracttree("do do do\nla") == Collection(
[UnnamedPassage([Line([do, do, do]), Line([la])])]
)
assert abstracttree("do do do\nla la la") == Collection(
[UnnamedPassage([Line([do, do, do]), Line([la, la, la])])]
)
assert abstracttree("do\nla\ndo\nla") == Collection(
[UnnamedPassage([Line([do]), Line([la]), Line([do]), Line([la])])]
)
assert abstracttree("do\n\nla") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])]
)
assert abstracttree("do\n\n\nla") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])]
)
assert abstracttree("do\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\nla\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("do\n\n\nla\n\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("f = do") == Collection(
[NamedPassage(Assignment(Word("f"), []), [Line([do])])]
)
assert abstracttree("f(x) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x")]), [Line([do])])]
)
assert abstracttree("f(x y) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])])]
)
assert abstracttree("f(x y) = do la") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do, la])])]
)
assert abstracttree("f(x y) = do\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\n\nla") == Collection(
[
NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])]),
UnnamedPassage([Line([la])]),
]
)
with pytest.raises(SymbolAllUnderscores):
abstracttree("_ = do")
with pytest.raises(SymbolAllUnderscores):
abstracttree("___ = do")
def test_comments():
do = Modified(Word("do"), 0, 0, 0, None, None, 1)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("""do""").comments == []
assert (
abstracttree(
"""do
"""
).comments
== ["\n"]
)
assert abstracttree("""do | one""").comments == ["| one"]
assert (
abstracttree(
"""do | one
"""
).comments
== ["| one\n"]
)
assert abstracttree("""do |one""").comments == ["|one"]
assert (
abstracttree(
"""do |one
"""
).comments
== ["|one\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do | one
la"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""do | one
la
"""
).comments
== ["| one\n", "\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""do | one
la | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do | one
la | two
"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
).comments
== ["\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""do
| two
la | three
"""
).comments
== ["\n", "| two\n", "| three\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert (
abstracttree(
"""do
| two
la | three
"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert abstracttree("""f = do | one""").comments == ["| one"]
assert (
abstracttree(
"""f = do | one
"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""f =
do | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""f =
do | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""f = | one
do | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""f = | one
do | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""| one
f =
do | three"""
).comments
== ["| one\n", "\n", "| three"]
)
assert (
abstracttree(
"""| one
f =
do | three
"""
).comments
== ["| one\n", "\n", "| three\n"]
)
assert (
abstracttree(
"""| one
f = | two
do | three"""
).comments
== ["| one\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""| one
f = | two
do | three
"""
).comments
== ["| one\n", "| two\n", "| three\n"]
)
def test_evaluate():
assert evaluate(abstracttree("do").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"))],
)
assert evaluate(abstracttree("do re mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do....").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[AbstractNote(0.0, 4.0, Word("do"))],
)
assert evaluate(
abstracttree("do.. re.. mi..").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(abstracttree("___").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[],
)
assert evaluate(abstracttree("do _ mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do __ mi").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do __ mi _").passages[0], Scope({}), 0, 0, (), ()
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\nre\nmi").passages[0], Scope({}), 0, 0, (), ()
) == (
1.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(0.0, 1.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\n_\nre mi").passages[0], Scope({}), 0, 0, (), ()
) == (
2.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(1.0, 2.0, Word("mi")),
],
)
assert evaluate(abstracttree("do'").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), octave=1)],
)
assert evaluate(abstracttree("do+1").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),))],
)
assert evaluate(
abstracttree("{do re mi}").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do @re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentStep(1),)),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{do @re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{{do @re mi}+1}>2").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(
0.0, 1.0, Word("do"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentDegree(2),)),
AbstractNote(
2.0, 3.0, Word("mi"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
],
)
assert evaluate(
abstracttree("{do re mi}:6").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}:*2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} fa").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
],
)
assert evaluate(
abstracttree("{do re mi}:6 fa").passages[0], Scope({}), 0, 0, (), ()
) == (
7.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
AbstractNote(6.0, 7.0, Word("fa")),
],
)
assert evaluate(abstracttree("do * 2").passages[0], Scope({}), 0, 0, (), ()) == (
2.0,
[AbstractNote(0.0, 1.0, Word("do")), AbstractNote(1.0, 2.0, Word("do"))],
)
assert evaluate(
abstracttree("do re mi * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("do")),
AbstractNote(4.0, 5.0, Word("re")),
AbstractNote(5.0, 6.0, Word("mi")),
],
)
def test_evaluate_assign():
definition = abstracttree("f(x y) = y x").passages[0]
assert evaluate(
abstracttree("do f(mi re) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
AbstractNote(4.0, 5.0, Word("so")),
],
)
assert evaluate(
abstracttree("do f({mi mi} {re re}) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
7.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("re")),
AbstractNote(3.0, 4.0, Word("mi")),
AbstractNote(4.0, 5.0, Word("mi")),
AbstractNote(5.0, 6.0, Word("fa")),
AbstractNote(6.0, 7.0, Word("so")),
],
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(mi)").passages[0], Scope({"f": definition}), 0, 0, (), ()
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(la la la)").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
)
definition = abstracttree("f = do\nmi\nso").passages[0]
assert evaluate(
abstracttree("la f la").passages[0], Scope({"f": definition}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("la")),
AbstractNote(1.0, 2.0, Word("do")),
AbstractNote(1.0, 2.0, Word("mi")),
AbstractNote(1.0, 2.0, Word("so")),
AbstractNote(2.0, 3.0, Word("la")),
],
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(mi)").passages[0], Scope({"f": definition}), 0, 0, (), ()
)
definition1 = abstracttree("f = do do").passages[0]
definition2 = abstracttree("g(x) = f x").passages[0]
assert evaluate(
abstracttree("g(mi)").passages[0],
Scope({"f": definition1, "g": definition2}),
0,
0,
(),
(),
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("do")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
definition1 = abstracttree("f = g(la)").passages[0]
definition2 = abstracttree("g(x) = f x").passages[0]
with pytest.raises(RecursiveFunction):
evaluate(
abstracttree("g(mi)").passages[0],
Scope({"f": definition1, "g": definition2}),
0,
0,
(),
(),
)
definition2 = abstracttree("g = do g mi").passages[0]
with pytest.raises(RecursiveFunction):
evaluate(
abstracttree("la g").passages[0],
Scope({"g": definition2}),
0,
0,
(),
(),
)
def test_evaluate_midlevel():
assert abstracttree(
"""
f(x y) = y x
do f({mi mi} {re re}) fa
"""
).evaluate(None) == (
6.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("re")),
AbstractNote(3.0, 4.0, Word("mi")),
AbstractNote(4.0, 5.0, Word("mi")),
AbstractNote(5.0, 6.0, Word("fa")),
],
Scope(
{
"f": NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]),
[
Line(
[
Modified(Word(val="y"), 0, 0, 0, None, None, 1),
Modified(Word("x"), 0, 0, 0, None, None, 1),
]
)
],
)
}
),
)
| 27.973884
| 87
| 0.41819
| 3,216
| 33,205
| 4.310323
| 0.028607
| 0.025682
| 0.165633
| 0.174001
| 0.93493
| 0.922306
| 0.881042
| 0.854855
| 0.810056
| 0.790434
| 0
| 0.049346
| 0.3897
| 33,205
| 1,186
| 88
| 27.99747
| 0.63469
| 0.002379
| 0
| 0.492203
| 0
| 0
| 0.053742
| 0
| 0
| 0
| 0
| 0
| 0.145224
| 1
| 0.007797
| false
| 0.135478
| 0.004873
| 0
| 0.012671
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
61503efa08bdc454ecd1d0a372f3de46d986c4f2
| 41,423
|
py
|
Python
|
python/experiments/lnpdfs/create_target_lnpfs.py
|
DrawZeroPoint/VIPS
|
730f4e18c24afa6f561b13d1fe8af53ae89990a7
|
[
"MIT"
] | 12
|
2018-07-11T14:35:51.000Z
|
2020-12-07T03:54:28.000Z
|
python/experiments/lnpdfs/create_target_lnpfs.py
|
DrawZeroPoint/VIPS
|
730f4e18c24afa6f561b13d1fe8af53ae89990a7
|
[
"MIT"
] | null | null | null |
python/experiments/lnpdfs/create_target_lnpfs.py
|
DrawZeroPoint/VIPS
|
730f4e18c24afa6f561b13d1fe8af53ae89990a7
|
[
"MIT"
] | 10
|
2018-07-11T14:36:00.000Z
|
2022-01-14T21:41:41.000Z
|
import numpy as np
from experiments.GMM import GMM
from scipy.stats import multivariate_normal as normal_pdf
import os
file_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.abspath(os.path.join(file_path, os.pardir, os.pardir, os.pardir)) + "/data/"
### Gaussian Mixture Model experiment
def build_GMM_lnpdf(num_dimensions, num_true_components, prior_variance=1e3):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
target_mixture = GMM(num_dimensions)
for i in range(0, num_true_components):
this_cov = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
this_cov = this_cov.transpose().dot(this_cov)
this_cov += 1 * np.eye(num_dimensions)
this_mean = 100 * (np.random.random(num_dimensions) - 0.5)
target_mixture.add_component(this_mean, this_cov)
target_mixture.set_weights(np.ones(num_true_components) / num_true_components)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
if without_prior:
return np.squeeze(target_mixture.evaluate(theta, return_log=True) - prior.logpdf(theta))
else:
return np.squeeze(target_mixture.evaluate(theta, return_log=True))
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol, target_mixture]
def build_GMM_lnpdf_autograd(num_dimensions, num_true_components):
import autograd.scipy.stats.multivariate_normal as normal_auto
from autograd.scipy.misc import logsumexp
import autograd.numpy as np
means = np.empty((num_true_components, num_dimensions))
covs = np.empty((num_true_components, num_dimensions, num_dimensions))
for i in range(0, num_true_components):
covs[i] = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
covs[i] = covs[i].transpose().dot(covs[i])
covs[i] += 1 * np.eye(num_dimensions)
means[i] = 100 * (np.random.random(num_dimensions) - 0.5)
def target_lnpdf(theta):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
cluster_lls = []
for i in range(0, num_true_components):
cluster_lls.append(np.log(1./num_true_components) + normal_auto.logpdf(theta, means[i], covs[i]))
return np.squeeze(logsumexp(np.vstack(cluster_lls), axis=0))
target_lnpdf.counter = 0
return [target_lnpdf, means, covs]
### Planar-N-Link experiment
def build_target_likelihood_planar_n_link(num_dimensions, prior_variance, likelihood_variance):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
likelihood = normal_pdf([0.7 * num_dimensions, 0], likelihood_variance * np.eye(2))
l = np.ones(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
if without_prior:
return np.squeeze(likelihood.logpdf(np.vstack((x,y)).transpose()))
else:
return np.squeeze(prior.logpdf(theta) + likelihood.logpdf(np.vstack((x,y)).transpose()))
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_target_likelihood_planar_n_link_4(num_dimensions, prior_variance, likelihood_variance):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
likelihood1 = normal_pdf([0.7 * num_dimensions, 0], likelihood_variance * np.eye(2))
likelihood2 = normal_pdf([-0.7 * num_dimensions, 0], likelihood_variance * np.eye(2))
likelihood3 = normal_pdf([0, 0.7 * num_dimensions], likelihood_variance * np.eye(2))
likelihood4 = normal_pdf([0, -0.7 * num_dimensions], likelihood_variance * np.eye(2))
l = np.ones(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
# likelihood = likelihood2.logpdf(np.vstack((x,y)).transpose())
likelihood = np.max(np.vstack((likelihood1.logpdf(np.vstack((x,y)).transpose()), likelihood2.logpdf(np.vstack((x,y)).transpose()), likelihood3.logpdf(np.vstack((x,y)).transpose()), likelihood4.logpdf(np.vstack((x,y)).transpose()))),axis=0)
if without_prior:
return np.squeeze(likelihood)
else:
return np.squeeze(prior.logpdf(theta) + likelihood)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_target_likelihood_planar_n_link_4_autograd(num_dimensions, prior_variance, likelihood_variance):
from autograd.scipy.stats import multivariate_normal as normal_auto
import autograd.numpy as np
l = np.ones(num_dimensions)
def target_lnpdf(theta):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
likelihood = np.max((
normal_auto.logpdf(np.vstack((x,y)).transpose(),[0.7 * num_dimensions, 0], likelihood_variance * np.eye(2)),
normal_auto.logpdf(np.vstack((x,y)).transpose(),[-0.7 * num_dimensions, 0], likelihood_variance * np.eye(2)),
normal_auto.logpdf(np.vstack((x,y)).transpose(),[0, 0.7 * num_dimensions], likelihood_variance * np.eye(2)),
normal_auto.logpdf(np.vstack((x,y)).transpose(),[0, -0.7 * num_dimensions], likelihood_variance * np.eye(2))))
return np.squeeze(normal_auto.logpdf(theta, np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions)) + likelihood)
target_lnpdf.counter = 0
return target_lnpdf
def build_target_likelihood_planar_n_link_3(num_dimensions, prior_variance, likelihood_variance):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
likelihood1 = normal_pdf([4,7], likelihood_variance * np.eye(2))
likelihood2 = normal_pdf([5,3], likelihood_variance * np.eye(2))
likelihood3 = normal_pdf([6,-2], likelihood_variance * np.eye(2))
l = np.ones(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
likelihood = np.max(np.vstack((likelihood1.logpdf(np.vstack((x,y)).transpose()), likelihood2.logpdf(np.vstack((x,y)).transpose()), likelihood3.logpdf(np.vstack((x,y)).transpose()))),axis=0)
if without_prior:
return np.squeeze(likelihood)
else:
return np.squeeze(prior.logpdf(theta) + likelihood)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_target_likelihood_planar_n_link_4_2(num_dimensions, prior_variance, likelihood_variance):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
likelihood1 = normal_pdf([4,7], likelihood_variance * np.eye(2))
likelihood2 = normal_pdf([5,3], likelihood_variance * np.eye(2))
likelihood3 = normal_pdf([6,-2], likelihood_variance * np.eye(2))
likelihood4 = normal_pdf([4,-6], likelihood_variance * np.eye(2))
l = np.ones(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i+1],1))
x += l[i] * np.cos(np.sum(theta[:,:i+1],1))
likelihood = np.max(np.vstack((likelihood1.logpdf(np.vstack((x,y)).transpose()), likelihood2.logpdf(np.vstack((x,y)).transpose()), likelihood3.logpdf(np.vstack((x,y)).transpose()), likelihood4.logpdf(np.vstack((x,y)).transpose()))),axis=0)
if without_prior:
return np.squeeze(likelihood)
else:
return np.squeeze(prior.logpdf(theta) + likelihood)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_target_likelihood_planar_autograd(num_dimensions):
from autograd.scipy.stats import multivariate_normal as normal_auto
import autograd.numpy as np
conf_likelihood_var = 4e-2 * np.ones(num_dimensions)
conf_likelihood_var[0] = 1
cart_likelihood_var = np.array([1e-4, 1e-4])
prior_mean = np.zeros(num_dimensions)
prior_cov = conf_likelihood_var * np.eye(num_dimensions)
likelihood_mean = [0.7 * num_dimensions, 0]
likelihood_cov = cart_likelihood_var * np.eye(2)
l = np.ones(num_dimensions)
def target_lnpdf(theta):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
y = np.zeros((len(theta)))
x = np.zeros((len(theta)))
for i in range(0, num_dimensions):
y += l[i] * np.sin(np.sum(theta[:,:i + 1],1))
x += l[i] * np.cos(np.sum(theta[:,:i + 1],1))
return normal_auto.logpdf(theta, prior_mean, prior_cov) + normal_auto.logpdf(np.vstack([x, y]).transpose(),
likelihood_mean, likelihood_cov)
target_lnpdf.counter = 0
return [target_lnpdf, num_dimensions, None]
### Logistic regression experiments
def build_logist_regression_autograd(X, y, prior_variance):
import autograd.numpy as np
import autograd.scipy.stats.multivariate_normal as normal_auto
num_dimensions = X.shape[1]
prior_mean = np.zeros(num_dimensions)
prior_cov = prior_variance * np.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
weighted_sum = np.dot(theta, X.transpose())
offset = np.maximum(weighted_sum, np.zeros(weighted_sum.shape))
denominator = offset + np.log(np.exp(weighted_sum - offset) + np.exp(-offset))
log_prediction = -denominator
swapped_y = -(y - 1)
log_prediction = log_prediction + swapped_y[np.newaxis, :] * (weighted_sum)
#log_prediction[np.where(np.isinf(log_prediction))] = 0
if (np.any(np.isnan(log_prediction)) or np.any(np.isinf(log_prediction))):
print('nan')
loglikelihood = np.sum(log_prediction,1)
if without_prior:
return np.squeeze(loglikelihood)
else:
return np.squeeze(normal_auto.logpdf(theta, prior_mean, prior_cov) + loglikelihood)
target_lnpdf.counter = 0
return target_lnpdf
def build_logist_regression(X, y, prior_variance):
import numpy as anp
num_dimensions = X.shape[1]
prior = normal_pdf(anp.zeros(num_dimensions), prior_variance * anp.eye(num_dimensions))
prior_chol = anp.sqrt(prior_variance) * anp.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = anp.atleast_2d(theta)
target_lnpdf.counter += len(theta)
weighted_sum = theta.dot(X.transpose())
offset = anp.maximum(weighted_sum, np.zeros(weighted_sum.shape))
denominator = offset + anp.log(anp.exp(weighted_sum - offset) + anp.exp(-offset))
log_prediction = -denominator
log_prediction[:,np.where(y == 0)] += weighted_sum[:,np.where(y == 0)]
log_prediction[np.where(anp.isinf(log_prediction))] = 0
if (anp.any(anp.isnan(log_prediction)) or anp.any(anp.isinf(log_prediction))):
print('nan')
loglikelihood = anp.sum(log_prediction,1)
if without_prior:
return np.squeeze(loglikelihood)
else:
return np.squeeze(prior.logpdf(theta) + loglikelihood)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol]
def build_breast_cancer_lnpdf(with_autograd=False):
if with_autograd:
import autograd.numpy as np
else:
import numpy as np
data = np.loadtxt(data_path + "datasets/breast_cancer.data")
y = data[:, 1]
X = data[:, 2:]
X /= np.std(X, 0)[np.newaxis, :]
X = np.hstack((np.ones((len(X), 1)), X))
prior_vars = 100
if with_autograd:
tmp = build_logist_regression_autograd(X, y, prior_vars)
def lnpdf(theta):
input = np.atleast_2d(theta)
lnpdf.counter += len(input)
return tmp(input)
lnpdf.counter = 0
return lnpdf
return build_logist_regression(X, y, prior_vars)
def build_german_credit_lnpdf(with_autograd=False):
data = np.loadtxt(data_path + "datasets/german.data-numeric")
y = data[:, -1] - 1
X = data[:, :-1]
X /= np.std(X, 0)[np.newaxis, :]
X = np.hstack((np.ones((len(X), 1)), X))
prior_vars = 100
if with_autograd:
return build_logist_regression_autograd(X, y, prior_vars)
return build_logist_regression(X, y, prior_vars)
### GP Regression Experiments
def build_GPR_lnpdf(X, y, prior_variance=1, prior_on_variance=True):
import GPy
from scipy.stats import multivariate_normal as mvn
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma.from_EV(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
prior_mean = np.zeros(num_dimensions+1)
prior_cov = prior_variance * np.eye(num_dimensions+1)
prior = mvn(prior_mean, prior_cov)
def target_lnpdf(input, without_prior=False):
input = np.atleast_2d(input)
thetas = np.exp(input)
target_lnpdf.counter += len(thetas)
output = []
for theta,inp in zip(thetas,input):
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
output.append(-m.objective_function() - prior.logpdf(inp))
else:
output.append(-m.objective_function())
return np.squeeze(np.array(output))
target_lnpdf.counter = 0
return target_lnpdf, prior_cov, np.linalg.cholesky(prior_cov)
def build_GPR2_lnpdf(X, y, prior_variance=10, prior_on_variance=True):
import GPy
from scipy.stats import multivariate_normal as mvn
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
if prior_on_variance:
prior_mean = np.zeros(num_dimensions+1)
prior_cov = prior_variance * np.eye(num_dimensions+1)
else:
prior_mean = np.zeros(num_dimensions)
prior_cov = prior_variance * np.eye(num_dimensions)
prior = mvn(prior_mean, prior_cov)
def target_lnpdf(input, without_prior=False):
input = np.atleast_2d(input)
thetas = np.exp(input)
target_lnpdf.counter += len(thetas)
output = []
for theta,inp in zip(thetas,input):
try:
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
output.append(-m.objective_function() - prior.logpdf(inp))
else:
output.append(-m.objective_function())
except:
output.append(np.NaN)
return np.squeeze(np.array(output))
target_lnpdf.counter = 0
return target_lnpdf, prior_cov, np.linalg.cholesky(prior_cov)
# This version does not support autograd (due to GPy), but converts autograds ArrayNodes to numpy arrays
def build_GPR_with_grad_lnpdf_autograd(X, y, prior_on_variance=True):
import GPy
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma.from_EV(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
import autograd
def target_lnpdf(theta, without_prior=False):
if isinstance(theta, autograd.numpy.numpy_extra.ArrayNode):
theta = theta.value
theta = np.exp(theta)
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
grad_dexpTheta = -m._log_likelihood_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [m.log_likelihood(), grad_dtheta]
else:
if prior_on_variance:
grad_dexpTheta = m.objective_function_gradients()[:-1]
else:
grad_dexpTheta = m.objective_function_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [-m.objective_function(), grad_dtheta]
target_lnpdf.counter += 1
target_lnpdf.counter = 0
return target_lnpdf
def build_GPR_with_grad_lnpdf(X, y, prior_on_variance=True):
import GPy
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma.from_EV(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
def target_lnpdf(theta, without_prior=False):
theta = np.exp(theta.value)
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
grad_dexpTheta = -m._log_likelihood_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [m.log_likelihood(), grad_dtheta]
else:
if prior_on_variance:
grad_dexpTheta = m.objective_function_gradients()[:-1]
else:
grad_dexpTheta = m.objective_function_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [-m.objective_function(), grad_dtheta]
target_lnpdf.counter += 1
target_lnpdf.counter = 0
return target_lnpdf
def build_GPR2_with_grad_lnpdf_absolutly_no_autograd(X, y, prior_on_variance=True):
import GPy
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
def target_lnpdf(theta, without_prior=False):
theta = np.exp(theta)
target_lnpdf.counter += 1
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
grad_dexpTheta = -m._log_likelihood_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [m.log_likelihood(), grad_dtheta]
else:
if prior_on_variance:
grad_dexpTheta = m.objective_function_gradients()[:-1]
else:
grad_dexpTheta = m.objective_function_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [-m.objective_function(), grad_dtheta]
target_lnpdf.counter = 0
return target_lnpdf
def build_GPR_with_grad_lnpdf_absolutly_no_autograd(X, y, prior_on_variance=True):
import GPy
num_dimensions = X.shape[1]
kernel = GPy.kern.RBF(num_dimensions, lengthscale=np.ones(num_dimensions), ARD=True)
kernel.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1., 0.1))
if prior_on_variance:
kernel.variance.set_prior(GPy.priors.Gamma.from_EV(1., 1))
m = GPy.models.GPRegression(X, y, kernel=kernel)
def target_lnpdf(theta, without_prior=False):
theta = np.exp(theta)
target_lnpdf.counter += 1
if prior_on_variance:
m.kern.variance = theta[0]
m.kern.lengthscale = theta[1:]
else:
m.kern.lengthscale = theta
if without_prior:
grad_dexpTheta = -m._log_likelihood_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [m.log_likelihood(), grad_dtheta]
else:
if prior_on_variance:
grad_dexpTheta = m.objective_function_gradients()[:-1]
else:
grad_dexpTheta = m.objective_function_gradients()[1:-1]
grad_dtheta = grad_dexpTheta * theta
return [-m.objective_function(), grad_dtheta]
target_lnpdf.counter = 0
return target_lnpdf
def build_GPR_iono_lnpdf(prior_on_variance=True):
data = np.loadtxt(data_path + "datasets/ionosphere.data")
y = data[::3, -1].reshape((-1,1))[:100].copy()
X = data[::3, :-1][:100].copy()
return build_GPR_lnpdf(X, y, prior_on_variance=prior_on_variance)
def build_GPR2_iono_lnpdf(prior_on_variance=True):
data = np.loadtxt(data_path + "datasets/ionosphere.data")
y = data[::3, -1].reshape((-1,1))[:100].copy()
X = data[::3, :-1][:100].copy()
return build_GPR2_lnpdf(X, y, prior_on_variance=prior_on_variance)
def build_GPR_iono_with_grad_lnpdf(remove_autograd=False):
data = np.loadtxt(data_path + "datasets/ionosphere.data")
y = data[::3, -1].reshape((-1,1))[:100].copy()
X = data[::3, :-1][:100].copy()
if remove_autograd:
return build_GPR_with_grad_lnpdf_autograd(X,y)
else:
return build_GPR_with_grad_lnpdf(X, y)
def build_GPR_iono_with_grad_lnpdf_no_autograd():
data = np.loadtxt(data_path + "datasets/ionosphere.data")
y = data[::3, -1].reshape((-1,1))[:100].copy()
X = data[::3, :-1][:100].copy()
return build_GPR_with_grad_lnpdf_absolutly_no_autograd(X, y)
def build_GPR_iono2_with_grad_lnpdf_no_autograd():
data = np.loadtxt(data_path + "datasets/ionosphere.data")
y = data[::3, -1].reshape((-1,1))[:100].copy()
X = data[::3, :-1][:100].copy()
return build_GPR2_with_grad_lnpdf_absolutly_no_autograd(X, y, prior_on_variance=False)
### Frisk Experiment
def build_frisk_lnpdf(prior_variance=1):
import experiments.lnpdfs.StopAndFrisk.frisk as frisk
lnpdf, _, num_dimensions, _, _= frisk.make_model_funs(precinct_type=1)
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = np.atleast_2d(theta)
target_lnpdf.counter += len(theta)
if without_prior:
return lnpdf(theta) - prior.logpdf(theta)
else:
return lnpdf(theta)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol, num_dimensions]
def build_frisk_autograd(prior_variance=1):
import experiments.lnpdfs.StopAndFrisk.frisk_autograd as frisk
import autograd.numpy as anp
lnpdf, _, num_dimensions, _, _= frisk.make_model_funs(precinct_type=1)
prior = normal_pdf(anp.zeros(num_dimensions), prior_variance * anp.eye(num_dimensions))
prior_chol = anp.sqrt(prior_variance) * anp.eye(num_dimensions)
def target_lnpdf(theta, without_prior=False):
theta = anp.atleast_2d(theta)
target_lnpdf.counter += len(theta)
if without_prior:
return lnpdf(theta) - prior.logpdf(theta)
else:
return lnpdf(theta)
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol, num_dimensions]
### Goodwin Oscillator
def build_Goodwin(target_labels, sigma=0.1, steps=81, deltaS=1.0, startTimeToObserv=41, nosOfObserv=2, gamma_shape=2., gamma_rate=1., parameters=None, seed=None):
from experiments.lnpdfs.goodwinoscillator.GoodwinOscillator import GoodwinOscillator as GoodwinOscillator
if parameters is None:
a1 = 1.0
a2 = 3.0
alpha = 0.5
rho = 10
g = 3 # number of genes
kappa = []
for i in range(g - 1):
if i == 0:
kappa.append(2.0)
else:
kappa.append(1.0)
kappa = np.array(kappa)
###############################################################
# setup parameters
parameters = np.concatenate(([rho, a1, a2, alpha], kappa))
else:
g = len(parameters) - 3
nosOfS = steps
# starting point
x0 = np.zeros(g)
#### an instance for Goodwin model
# observ = np.load(data_path+"/datasets/goodwin_observations_12345.npy")
# goodwin = GoodwinOscillator(parameters=parameters, x0=x0, target_param_label=target_labels, nosOfS=nosOfS,
# deltaS=deltaS, \
# gamma_shape=gamma_shape, gamma_rate=gamma_rate, \
# nosOfObserv=nosOfObserv, sigma=sigma, \
# startTimeToObserv=startTimeToObserv,
# observations=observ)
goodwin = GoodwinOscillator(parameters=parameters, x0=x0, target_param_label=target_labels, nosOfS=nosOfS,
deltaS=deltaS, \
gamma_shape=gamma_shape, gamma_rate=gamma_rate, \
nosOfObserv=nosOfObserv, sigma=sigma, \
startTimeToObserv=startTimeToObserv, seed=seed)
def target_lnpdf(thetas):
thetas = np.atleast_2d(np.exp(thetas))
target_lnpdf.counter += len(thetas)
lnpdfs = np.empty(len(thetas))
for i in range(len(thetas)):
lnpdfs[i] = goodwin.conditionalPosterior(thetas[i])
return lnpdfs
target_lnpdf.counter = 0
return target_lnpdf
def build_Goodwin_grad_with_lnpdf(target_labels, sigma=0.1, steps=81, deltaS=1.0, startTimeToObserv=41, nosOfObserv=2, gamma_shape=2., gamma_rate=1., parameters=None, seed=None):
from experiments.lnpdfs.goodwinoscillator.GoodwinOscillator import GoodwinOscillator as GoodwinOscillator
if parameters is None:
a1 = 1.0
a2 = 3.0
alpha = 0.5
rho = 10
g = 3 # number of genes
kappa = []
for i in range(g - 1):
if i == 0:
kappa.append(2.0)
else:
kappa.append(1.0)
kappa = np.array(kappa)
###############################################################
# setup parameters
parameters = np.concatenate(([rho, a1, a2, alpha], kappa))
else:
g = len(parameters) - 3
nosOfS = steps
# starting point
x0 = np.zeros(g)
num_dimensions = len(target_labels)
goodwin = GoodwinOscillator(parameters=parameters, x0=x0, target_param_label=target_labels, nosOfS=nosOfS,
deltaS=deltaS, \
gamma_shape=gamma_shape, gamma_rate=gamma_rate, \
nosOfObserv=nosOfObserv, sigma=sigma, \
startTimeToObserv=startTimeToObserv, seed=seed)
def target_lnpdf(thetas):
thetas = np.atleast_2d(np.exp(thetas))
target_lnpdf.counter += len(thetas)
lnpdfs = np.empty(len(thetas))
grads = np.empty((len(thetas), num_dimensions))
for i in range(len(thetas)):
lnpdfs[i] = goodwin.conditionalPosterior(thetas[i])
grads[i] = goodwin.gradient_logposterior(thetas[i]) * thetas[i]
return lnpdfs, grads
target_lnpdf.counter = 0
return target_lnpdf
def build_Goodwin_grad(target_labels, sigma=0.1, steps=81, deltaS=1.0, startTimeToObserv=41, nosOfObserv=2, gamma_shape=2., gamma_rate=1., parameters=None, seed=None):
from experiments.lnpdfs.goodwinoscillator.GoodwinOscillator import GoodwinOscillator as GoodwinOscillator
if parameters is None:
a1 = 1.0
a2 = 3.0
alpha = 0.5
rho = 10
g = 3 # number of genes
kappa = []
for i in range(g - 1):
if i == 0:
kappa.append(2.0)
else:
kappa.append(1.0)
kappa = np.array(kappa)
###############################################################
# setup parameters
parameters = np.concatenate(([rho, a1, a2, alpha], kappa))
else:
g = len(parameters) - 3
nosOfS = steps
# starting point
x0 = np.zeros(g)
num_dimensions = len(target_labels)
goodwin = GoodwinOscillator(parameters=parameters, x0=x0, target_param_label=target_labels, nosOfS=nosOfS,
deltaS=deltaS, \
gamma_shape=gamma_shape, gamma_rate=gamma_rate, \
nosOfObserv=nosOfObserv, sigma=sigma, \
startTimeToObserv=startTimeToObserv, seed=seed)
def target_lnpdf(thetas):
thetas = np.atleast_2d(np.exp(thetas))
target_lnpdf.counter += len(thetas)
lnpdfs = np.empty(len(thetas))
grads = np.empty((len(thetas), num_dimensions))
for i in range(len(thetas)):
lnpdfs[i] = -1 # goodwin.conditionalPosterior(thetas[i])
grads[i] = goodwin.gradient_logposterior(thetas[i]) * thetas[i]
return lnpdfs, grads
target_lnpdf.counter = 0
return target_lnpdf
def build_1d():
def likelihood(x):
return 10 * (0.5 * np.exp(-0.5 * np.square(x - 7) / 20)
- 30 * (-5 < x < 5)
+ 0.48 * np.exp(-0.5 * np.square(x + 7) / 20)
- np.square(x) / 1000)
def target_lnpdf(theta, without_prior=False):
target_lnpdf.counter += 1
lnpdfs = []
theta = np.atleast_1d(theta)
theta = theta.flatten()
for x in theta:
lnpdfs.append(likelihood(x))
return np.array(lnpdfs)
target_lnpdf.counter = 0
return target_lnpdf
def build_ball_in_a_cup_lnpdf_parallel(poolsize, prior_var=2):
import pathos.multiprocessing as multiprocessing
from scipy.stats import multivariate_normal
from pathos.helpers import mp
from SLGetInfo_SWIG_barrett import SLGetInfo_SWIG
from SLSendTrajectory_SWIG_barrett import SLSendTrajectory_SWIG
import time
p = multiprocessing.Pool(poolsize)
timesteps = 1400
numDimensions = 7
dmpStartPos = np.array([ 0.39421758, 0.69157279, -1.11048341, 1.33390546, 0.60440922,
-0.08549518, -0.6456306 ])
dmpStartVel = np.zeros(7)
dmpGoalVel = np.zeros(7)
tau = 0.07142857
dmpAlphaX = 6.25
dmpBetaX = 6.25
dmpAmplitudeModifier = np.ones((1, numDimensions))
dt = 0.01
# basis = np.load('biac_basis2.npy')
basis = np.load('basis_1400_5.npy')
def referenceTrajectory(theta):
# dmpGoalPos = theta[:numDimensions]
dmpGoalPos = dmpStartPos
dmpWeights = theta * 100
referencePos = np.zeros((timesteps, numDimensions))
referenceVel = np.zeros((timesteps, numDimensions))
referencePos[0, :] = dmpStartPos
referenceVel[0, :] = dmpStartVel
forcingFunction = basis.dot(dmpWeights.reshape((numDimensions, -1)).transpose())
goalVel = dmpGoalVel * tau / (dt * timesteps)
for i in range(0, timesteps - 1):
movingGoal = dmpGoalPos - goalVel * dt * (timesteps - i)
acc = dmpAlphaX * (dmpBetaX * (movingGoal - referencePos[i, :]) * tau ** 2 + (goalVel - referenceVel[i,:]) * tau) + \
dmpAmplitudeModifier * forcingFunction[i, :] * tau ** 2
referenceVel[i + 1, :] = referenceVel[i,:] + dt * acc
referencePos[i + 1, :] = referencePos[i,:] + dt * referenceVel[i + 1, :]
# plt.figure(123)
# plt.clf()
# plt.plot(referencePos)
# plt.pause(0.01)
return referencePos
joint_limits = np.array([[-2.6,2.6],[-2.1,2.],[-2.8,2.8],[-0.9, 3.2], [-4.8, 1.3], [-1.6,1.6],[-2.2,2.2]])
def clip_to_jointlimits(traj):
return np.clip(traj, joint_limits[:, 0], joint_limits[:, 1])
# return not(np.any(np.min(traj,axis=0) < joint_limits[:,0]) or np.any(np.max(traj,axis=0) > joint_limits[:,1]))
shm_offsets = 7 * np.arange(poolsize)
def target_lnpdf_single(dmp_params):
params_with_goal = dmp_params
# threadID = int(mp.context.threading.currentThread().name.split('-')[-1]) % poolsize
threadID = int(mp.context.process.current_process().name.split('-')[-1]) % poolsize
shm_offset = int(shm_offsets[threadID])
initState = np.array([0., 0.])
[N_DOFS, N_DOFS_SHM, _] = SLGetInfo_SWIG()
maxCommands = 2
numCommand = 1
waitTime = 0.0
zero_trajectory = np.repeat(np.reshape(dmpStartPos, (1,-1)),1000, axis=0)
# np.zeros((1000, N_DOFS_SHM))
timeOut = 20
stateBuffer = initState
refTraj = clip_to_jointlimits(referenceTrajectory(params_with_goal))
[trajState, flag] = SLSendTrajectory_SWIG(numCommand,
maxCommands,
waitTime,
zero_trajectory,
stateBuffer,
timeOut,
shm_offset)
# traj = SLGetEpisodeSWIG(2,shm_offset)[0]
# if not np.all(np.isfinite(traj)):
# print("something went wrong")
#time.sleep(0.)
[trajState, flag] = SLSendTrajectory_SWIG(2,
maxCommands,
waitTime,
refTraj,
np.zeros((0)),
timeOut,
shm_offset)
# time.sleep(1)
# traj = SLGetEpisodeSWIG(2,shm_offset)[0]
# if not np.all(np.isfinite(traj)):
# print("something went wrong")
# print(trajState[0])
# import hashlib
# filename = "/tmp/biacdebug/"+ hashlib.md5(str(params_with_goal).encode('utf-8')).hexdigest()
# np.save("/tmp/biacdebug/"+ hashlib.md5(str(params_with_goal).encode('utf-8')).hexdigest(),trajState)
if flag == 1:
reward = trajState[0]
else:
reward= -np.Inf
return 5 * reward
# prior = multivariate_normal(np.zeros(numDimensions*9), prior_var * np.eye(numDimensions*9))
def target_lnpdf(theta):
input = np.atleast_2d(theta)
# manager = multiprocessing.Manager()
# idQueue = manager.Queue()
# for i in ids:
# idQueue.put(i)
rewards = p.map(target_lnpdf_single, input)
if np.any(np.asarray(rewards) > -1):
print('error')
print(rewards)
# print(prior.logpdf(input))
return rewards #+ prior.logpdf(input)
return target_lnpdf
def build_ball_in_a_cup_lnpdf():
from SLGetInfo_SWIG_barrett import SLGetInfo_SWIG
from SLSendTrajectory_SWIG_barrett import SLSendTrajectory_SWIG
import time
timesteps = 1000
numDimensions = 7
dmpStartPos = np.array([ 0.39421758, 0.69157279, -1.11048341, 1.33390546, 0.60440922,
-0.08549518, -0.6456306 ])
dmpStartVel = np.zeros(7)
dmpGoalVel = np.zeros(7)
tau = 0.1
dmpAlphaX = 6.25
dmpBetaX = 6.25
dmpAmplitudeModifier = np.ones((1, numDimensions))
dt = 0.01
# basis = np.load('biac_basis2.npy')
basis = np.load('basisFctn4.npy')
def referenceTrajectory(theta):
# dmpGoalPos = theta[:numDimensions]
dmpGoalPos = dmpStartPos
dmpWeights = theta * 100
referencePos = np.zeros((timesteps, numDimensions))
referenceVel = np.zeros((timesteps, numDimensions))
referencePos[0, :] = dmpStartPos
referenceVel[0, :] = dmpStartVel
forcingFunction = basis.dot(dmpWeights.reshape((numDimensions, -1)).transpose())
goalVel = dmpGoalVel * tau / (dt * timesteps)
for i in range(0, timesteps - 1):
movingGoal = dmpGoalPos - goalVel * dt * (timesteps - i)
acc = dmpAlphaX * (dmpBetaX * (movingGoal - referencePos[i, :]) * tau ** 2 + (goalVel - referenceVel[i,:]) * tau) + \
dmpAmplitudeModifier * forcingFunction[i, :] * tau ** 2
referenceVel[i + 1, :] = referenceVel[i,:] + dt * acc
referencePos[i + 1, :] = referencePos[i,:] + dt * referenceVel[i + 1, :]
return referencePos
# joint_limits = np.array([[-2.6,2.6],[-2.1,2.],[-2.8,2.8],[-0.9, 3.2], [-4.8, 1.3], [-1.6,1.6],[-2.2,2.2]])
joint_limits = np.array([[-2.4,2.4],[-1.9,1.8],[-2.6,2.6],[-0.7, 3.0], [-4.6, 1.1], [-1.4,1.4],[-2.0,2.0]])
def clip_to_jointlimits(traj):
return np.clip(traj, joint_limits[:, 0], joint_limits[:, 1])
def target_lnpdf_single(dmp_params):
params_with_goal = dmp_params
initState = np.array([0., 0.])
[N_DOFS, N_DOFS_SHM, _] = SLGetInfo_SWIG()
maxCommands = 2
numCommand = 1
waitTime = 0.0
zero_trajectory = np.repeat(np.reshape(dmpStartPos, (1,-1)),1000, axis=0)
timeOut = 20
stateBuffer = initState
refTraj = referenceTrajectory(params_with_goal)
if (np.any(((refTraj-joint_limits.T[1][np.newaxis:7])/joint_limits.T[1][np.newaxis:7])>5)
or np.any(((refTraj-joint_limits.T[0][np.newaxis:7])/joint_limits.T[0][np.newaxis:7])>5)):
return -2000
refTraj = clip_to_jointlimits(refTraj)
[trajState, flag] = SLSendTrajectory_SWIG(numCommand,
maxCommands,
waitTime,
zero_trajectory,
stateBuffer,
timeOut,
0)
[trajState, flag] = SLSendTrajectory_SWIG(2,
maxCommands,
waitTime,
refTraj,
np.zeros((0)),
timeOut,
0)
SLSendTrajectory_SWIG(-1, 1, 0.0, 0 * zero_trajectory,
np.array([]),
10,
0)
# time.sleep(1)
# traj = SLGetEpisodeSWIG(2,shm_offset)[0]
# if not np.all(np.isfinite(traj)):
# print("something went wrong")
# print(trajState[0])
# import hashlib
# filename = "/tmp/biacdebug/"+ hashlib.md5(str(params_with_goal).encode('utf-8')).hexdigest()
# np.save("/tmp/biacdebug/"+ hashlib.md5(str(params_with_goal).encode('utf-8')).hexdigest(),trajState)
if flag == 1:
reward = trajState[0]
if (reward > 10000 or reward < -10000):
print('strange reward')
reward = -10000
else:
reward= -10000
return reward
def target_lnpdf(theta):
input = np.atleast_2d(theta)
target_lnpdf.counter += len(input)
input = np.repeat(np.linspace(1.,10.,int(input.shape[1]/7)).reshape((1,-1)),[7],axis=0).flatten() * input
rewards = np.empty((len(input)))
for i in range(len(rewards)):
rewards[i] = target_lnpdf_single(input[i])
# if np.any(np.asarray(rewards) > -1):
# print('error')
print(rewards)
return 0.5 * rewards
target_lnpdf.counter = 0
return target_lnpdf
| 42.836608
| 247
| 0.611786
| 5,224
| 41,423
| 4.663476
| 0.073315
| 0.060832
| 0.033987
| 0.018718
| 0.863024
| 0.840818
| 0.826164
| 0.794311
| 0.762827
| 0.749938
| 0
| 0.030556
| 0.262077
| 41,423
| 966
| 248
| 42.880952
| 0.766447
| 0.066823
| 0
| 0.771863
| 0
| 0
| 0.006175
| 0.004559
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079848
| false
| 0
| 0.050697
| 0.003802
| 0.231939
| 0.007605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
61698a109a3d4c71ae2d700220f0e739b0f66489
| 6,353
|
py
|
Python
|
tests/test_import.py
|
cazier/jeopardy
|
2843985e98b9d871e0872c23f4ae2f8f0fa5f42a
|
[
"MIT"
] | null | null | null |
tests/test_import.py
|
cazier/jeopardy
|
2843985e98b9d871e0872c23f4ae2f8f0fa5f42a
|
[
"MIT"
] | 20
|
2021-01-15T20:47:59.000Z
|
2022-01-23T17:53:58.000Z
|
tests/test_import.py
|
cazier/jeopardy
|
2843985e98b9d871e0872c23f4ae2f8f0fa5f42a
|
[
"MIT"
] | null | null | null |
import json
import pathlib
import pytest
from jeopardy import api
def test_add_one_long(emptyclient):
clue = {
"date": "2020-01-01",
"show": 1,
"round": 0,
"complete": True,
"answer": "answer",
"question": "question",
"external": True,
"value": 1,
"category": "test",
}
message = api.database.add(clue_data=clue, uses_shortnames=False)
assert message is not None
def test_add_one_long_missing(emptyclient):
clue = {
"date": "2020-01-01",
"show": 1,
"round": 0,
"answer": "answer",
"question": "question",
"external": True,
"value": 1,
"category": "test",
}
with pytest.raises(api.database.MissingDataError, match=".*following keys.*"):
api.database.add(clue_data=clue, uses_shortnames=False)
def test_add_one_short(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
message = api.database.add(clue_data=clue, uses_shortnames=True)
assert message is not None
def test_add_one_short_missing(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.MissingDataError, match=".*following keys.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_multiple(emptyclient):
clues = [
{
"d": "2020-01-01",
"s": 3,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
},
{
"d": "2020-01-01",
"s": 4,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
},
]
results = [api.database.add(clue_data=clue, uses_shortnames=True) for clue in clues]
assert all((response is not None for response in results))
def test_add_one_empty(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
for key in clue.keys():
data = {k: v if k != key else "" for k, v in clue.items()}
with pytest.raises(api.database.MissingDataError, match=".*has an empty.*"):
api.database.add(clue_data=data, uses_shortnames=True)
def test_add_one_repeat(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.SetAlreadyExistsError):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_date(emptyclient):
clue = {
"d": "20210-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*date is in the isoformat.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_show(emptyclient):
clue = {
"d": "2020-01-01",
"s": "alex",
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*show number is an integer.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_round_not_integer(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": "alex",
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*round number is one of the.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_round_not_valid(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 3,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*round number is one of the.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_complete(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": "alex",
"a": "answer",
"q": "question",
"e": True,
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*complete tag is supplied.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_value_not_integer(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": "alex",
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*value is a positive number.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_value_not_positive(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": True,
"v": -1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*value is a positive number.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
def test_add_bad_external(emptyclient):
clue = {
"d": "2020-01-01",
"s": 2,
"r": 0,
"f": True,
"a": "answer",
"q": "question",
"e": "alex",
"v": 1,
"c": "test",
}
with pytest.raises(api.database.BadDataError, match=".*external tag is supplied.*"):
api.database.add(clue_data=clue, uses_shortnames=True)
| 23.356618
| 90
| 0.487801
| 750
| 6,353
| 4.02
| 0.117333
| 0.098507
| 0.049751
| 0.089552
| 0.860365
| 0.831841
| 0.822554
| 0.798342
| 0.798342
| 0.746932
| 0
| 0.041193
| 0.335117
| 6,353
| 271
| 91
| 23.442804
| 0.672585
| 0
| 0
| 0.718062
| 0
| 0
| 0.15473
| 0
| 0
| 0
| 0
| 0
| 0.013216
| 1
| 0.066079
| false
| 0
| 0.017621
| 0
| 0.0837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
616d57a2e3829968aabb76f87b03b846880abe3f
| 168
|
py
|
Python
|
drifter_ml/classification_tests/__init__.py
|
mc-robinson/drifter_ml
|
fe9d0d71b57b9bfba7ad67968bb583dab7dc6212
|
[
"MIT"
] | 88
|
2019-03-15T01:25:43.000Z
|
2022-01-13T05:08:41.000Z
|
drifter_ml/classification_tests/__init__.py
|
mc-robinson/drifter_ml
|
fe9d0d71b57b9bfba7ad67968bb583dab7dc6212
|
[
"MIT"
] | 32
|
2019-03-20T16:16:56.000Z
|
2022-01-23T05:06:27.000Z
|
drifter_ml/classification_tests/__init__.py
|
mc-robinson/drifter_ml
|
fe9d0d71b57b9bfba7ad67968bb583dab7dc6212
|
[
"MIT"
] | 8
|
2019-04-02T21:54:42.000Z
|
2020-11-05T11:47:15.000Z
|
from .classification_tests import ClassificationTests
from .classification_tests import ClassifierComparison
__all__ = ["ClassificationTests", "ClassifierComparison"]
| 33.6
| 57
| 0.863095
| 13
| 168
| 10.692308
| 0.538462
| 0.258993
| 0.330935
| 0.417266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077381
| 168
| 4
| 58
| 42
| 0.896774
| 0
| 0
| 0
| 0
| 0
| 0.232143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
6192d8ec841e34aaeb5fb877cfcd3f23cc0eb6aa
| 1,565
|
py
|
Python
|
139-Word_Break.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | 6
|
2018-06-13T06:48:42.000Z
|
2020-11-25T10:48:13.000Z
|
139-Word_Break.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | null | null | null |
139-Word_Break.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | null | null | null |
## Recursion with memoization
# Time: O(n^3)
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
@lru_cache
def recur( s, word_dict, start ):
if start == len(s):
return True
for end in range( start+1, len(s)+1 ):
if s[start:end] in word_dict and recur( s, word_dict, end ):
return True
return False
return recur( s, frozenset(wordDict), 0 )
class SolutionII:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
memo = [-1]*len(s)
def recur( s, word_dict, start ):
if start == len(s):
return True
if memo[start] != -1:
return memo[start]
for end in range( start+1, len(s)+1 ):
if s[start:end] in word_dict and recur( s, word_dict, end ):
memo[start] = True
return True
memo[start] = False
return False
return recur( s, set(wordDict), 0 )
## Brute Force
# Time: O(2^n)
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
def recur( s, word_dict, start ):
if start == len(s):
return True
for end in range( start+1, len(s)+1 ):
if s[start:end] in word_dict and recur( s, word_dict, end ):
return True
return False
return recur( s, set(wordDict), 0 )
| 31.3
| 76
| 0.480511
| 197
| 1,565
| 3.766497
| 0.213198
| 0.072776
| 0.080863
| 0.113208
| 0.781671
| 0.781671
| 0.781671
| 0.781671
| 0.718329
| 0.665768
| 0
| 0.01413
| 0.412141
| 1,565
| 49
| 77
| 31.938776
| 0.792391
| 0.040895
| 0
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
f605cae6d51330b79450dff4a3b1f36b8f2137fd
| 81,264
|
py
|
Python
|
tctf/crypto/babyring/release/task.py
|
deut-erium/WriteUps
|
36b4193f5fab9f95527a48626ecba631d5a03796
|
[
"MIT"
] | 11
|
2020-06-06T05:28:27.000Z
|
2022-01-09T00:42:49.000Z
|
2020/tctf/crypto/babyring/release/task.py
|
CSEA-IITB/WriteUps
|
46e7f36b0c4ef182cbaf375fd10fda954b6667a0
|
[
"MIT"
] | 1
|
2020-09-06T18:19:55.000Z
|
2020-09-06T18:19:55.000Z
|
tctf/crypto/babyring/release/task.py
|
deut-erium/WriteUps
|
36b4193f5fab9f95527a48626ecba631d5a03796
|
[
"MIT"
] | 6
|
2020-06-06T05:36:43.000Z
|
2021-08-11T10:17:18.000Z
|
#!/usr/bin/python2
import os,random,sys,string
from hashlib import sha256
from struct import pack, unpack
import SocketServer
from Crypto.Cipher import ARC4
from flag import flag
K = 64
def gen():
from Crypto.Util.number import getStrongPrime
e = 65537
Ns = []
for i in range(K):
p = getStrongPrime(2048)
q = getStrongPrime(2048)
Ns.append(p*q)
return e,Ns
e, Ns = (65537, [770490466907683602110378071421369221862921320503590614209797163508400496321974699751838925910212582067640033338208253848185705000931772110272228438746303216456214840281999977271664884049737412489683336247146905970790657719110482280684265791321005072325768791089586227842965307689398081448494378605026677164880175617681310072923299030259218276844723163201010992461606002634098913814203454235138549874354352794501374973634628738845273607238282863229873145869264050655662663079247102827774495142451851523340665219493474500446630311650947391863060353578845519230453003253319043010396105487377221402499331935031452578682212804231558194990663195372223764943882094463713596011601657519999628272423455022371050823050808824658199918369321615075229948272656495349149432117067836490889547265127126795646868049067849776438415984256095396376586400344784872140819756271139898769893788303870788292068857936681358238629576670680615413707360812345103022413565834670031956214258628401267394717093065558297210543991364219326902442594221010678553053229221766996388105122917592614388933032203219308304446696277824948231236918841049974834775435864666332248390351274331259641759161745003665310088775244490548767780498840500389907455677369729620283990291401L, 885692755284206502623656640956321773698340057958522994776457088731177145967223428506812999560658424426883854168572616677171449076739279082401634389686610855276271785643831068107170911665180062775179038673702798400461025486936709942257333714793117558020594684836888134278343465452037720334680777471474803877353672031629236505572369881207911142259578344578542867799668740472787713174050773804954627545280186831607985142819341596151799378009933419637054476605990989277546082605763851778110856368688894689661019565784991491657429411993720922315683476329440320650384511831154275674572193830120757263904863415560890165744429428227736525959253833247914165820428411925760527040018988727178866925845867433365520457677542152285153115114056520252017789092342900065830058449753702124487943385928377186289755886495235335132031291695126519206454413127358971964710479671514021468814714206524162403876281366804077177021150717180769633987615681793882937492501382409247814798005782245356916203200164020521055729162096224783806346543856287585696197192534233632618952400112843644977356345340168612627239375673102496580297842771014309796142844999368360923802948703973187319801070509170903620362112278983245653067338392885313393063497333407891752230845067L, 852510728627885514805129432448163936779455233359086090604312652068629018425646417864053240292564868645234609828474059125927678326756692996193572521176066669844551257373056766350980392600587776680296439317162554050157487448502725678967343774233531797464029879519668956912470673909584080157249109731691038677337219948825582995263809631292406318333736673933342694806364339993778964768895539956491284745958670525072586055766163093796745320022551876648942550686343092276782451429378927071542570490425607161843752913262615394023722564033012544009402259413994858767178047305251465303229129975086038060987895455232636102827136079929671596033532219102709039198169435722488439503206979086571639841495988427056862301017026101644939848257356305389476553384280273684072026665938844176242786370072951360568331339330010070786975462033409224745429859501334187781645302568728500974494446636451705432788871519140291342928301307882895681003009188893450892871138889050900575953176699685838295039997920426217150725512084954211906081443890155366870920146976136818951705263077533492017609990289213994450599090252769565091985594978663906563679579023209003502653137925740948486050084190054397989017431566658676945478870957684955932861000783988758476871280887L, 759699527819513059664864927699001755706659800861644147927523334322903799150426099406666688387582727287516113685315626733430575862164452994786853328817814267470416282112764016243236981123416528726304286290907273991799690978404983353383119525914386080925303410290000146230560316032937085197123961190404442625226458102608504317365103950915908640115988714023226526257605273499127234995705422677082113854945017998401039436881831519820999352884509497429048645314338361570974148460720269103744868140221574885395761727200907278859389658298383247417244963706633509725423674127663629034604701586362719601860174562994476315198301521023483989136540402350867567009899435356588200630983744688964637005354558490723339435160405103857842712294882540811713486781092293025328033176200451811419971289585921675846829009876168031917318201290577581231186138466773821928472492338845065719808408462195259676407103486336063397539598459843226973031283831009224751314314312198266221809441659587277508407899097952559004100262755263043652162272389587439063306189846276235657268876528907246426696239727749626669211407859464164620179199033905288688949222239565884987417397122407643877054156637062344391192797574207749734076836923696363578975835017493880243158589933L, 758238148835364355270583900678785603168153523890190488992037660976078930826249443116191417583260762066216150212791732455214146783888356256457465675566825590164672686105568238534467847903406146371509090388467356506656748250546104016583844158902105080346541394019774225345479131912535834485550948051429809883747198889156209708401599111708989494332124184061683034428797609348476196452988185886797314779893690010113944635579262267705880485671443287031971696514725981681994067937561861059408724374305142619247386340852733083694341963450948068331262446515501246041244561550955642705046184923574170991972633928657161367812440821543776040098172287803945448583439742834180827170558724498666685630684598868114064825287004555028324604625278035298946175990255835987060927245252589776296509430829703175797903316807565305308785620196656187521342401946657954267020286466794064751726824434303252542247613448512051807899317083922656334566794428039303164605656166256827956614351001308646267203913112938035338074904364716153947683736110511293036657571515934733793408855887700292578151102347919082003531170063475011264737860653849600790774952657388252742978025744229966581024635576948772396499652752044399098561934245839038660418846369577093264402028707L, 781395013325697887333216521681560331328819797762829844109647323483221839014959034360713669610858901722206765455259117207667613965471215230047292514920158057976818734609237622104838313026358128242676600275226102480411946020560651201524140878013608366355013047200395741212010042669478778047237861338916916670781924577008857001626985919399536562746542124555782879357110166810194239767672956217242147624304152848143786351175175939137078911511702197722380707463529808410180462210870377362736626697849591315871938159284117681884145651502825026732068413240322951643308228344819919562605857825161628624959194278082710776999071929700597157672205916710020858892468387852332916018768451407839868402306639207341975230876911798411575084243669102750073311303619857878923946912712124829047255096234444474553211184611503778800002760019662176665862622654901757629915580777378489561829533537864141597696375978578378494622401590109341253101258098820655212297152844550070331196989259266707917720381627869908336170229556348492418070833632808239891119479202207008409271601681548267098320777420240144506760266391891040204979979903644711422559349665327347348516761746396770104479993387906144114986109453029209081625600231211170349339899017592074153684919031L, 951999459821530185693099236643980659176339949279841534564349622350964477502015819059181413327121030376835878431013732518217525653087151326465657963659816867905715348650693613909910905733104899061987933143980891459608357093275398176277920820926872227472813833035964395555641237228182532282411320780138288685186939111681336257040749950668160356237184541060248338104637077341763815540768449875867330392930299803477067582797547424361066807562766179307721797894322715818820316779962558599469176899005291796993355199829280156662713403644683496633816673904469909705587632458345813965583563677738712256824470617672508884234845317733602980957239038330336635689092581076551044743870223843839308145612758089553559599485501633485517619146215899704088713395698798208912755230121394233091768338245406946576754690820520852909271341786633634609975679391345735042400376356183339361878540783591807240663474728167623094213364032350402991100861468745348309813545992525134006835058736181227567014714936486739772834098068660378311735624484277594438924198496801200537426219007603163698337253636904246623556005396010912159226202736093184056494268034195732427988926370029574283264048482151544813681827447600065017901912292283237909043710433305639717328629451L, 711711771348097213075627374048456290308750589122997550158456585177191886572212087987230759823036879061134187341587656249450699801192903277316721946898115140614301524652992447006322181332456956857622825399558408310245234922199345132480408939243147253058392000161129904542095080553449399917798875266459226991918556728174764032204370490225450097908552856571478935790975538704480114450658552661006857286129790951429180362462754760130282504254738054668412827645587111098833166611492652962819043283531835897208086110176357161158182822482005814808673199480999624543523540777966479951733822905058813130629074936524824635587444403512878789641385611535820795706467532253354292894749914508497861504671564874381124032300695318259597819976025526203013293987209873546624367823522366940071837150034097856140139907805492491911925426403902331200428674238323217658159690156178600452096154934025896659670983859451203584067080309174004144720555716275970224913192998427534470146433570139413591107723132060240001193787069421458338447881691612616268953753630181764712931763254906564014310357306214892907533150590979490927264992409975776915388673724302142418969066676042562614692313532540530579069795505221440883141808132926316976791412769648857135803769643L, 621812039653267035470318018097859845024368047216586811606444198134943642501951707153428781379497624336692620854997464650424554153954814293455876757007539578869715980657570697761917047855107338594091726756171558513825763374178880472267295724810019173228767827609190023690036751611201351496881891034735963206215504414581351541626920303841810737050681394015135296583232800071074612479156586452179328569279523773663225380771988566947147706757205803305883505163935777914721145836349346530067708513075800131242604991660376498715928175146309605951506210267685214446288048884213296330547303646964617555885985804885745458192624646542147076808031539798641537665150495671678181023348381278843238514623020399240844964693058257641554523017703437968279111401196661744156681384016993540290508878337826973697788591055013734017366370120547666933130683042363558213192630508270365942028247421214690466044924090940379428732632879446939831083198136354010611810407616499593617413538891466509462185849778818660144765537209744166685113002866808027787488037934551780164377858647527413980070141569071128317628431319025659077986138367806320011316802398602755092149450019001294412020615171782612223981880983615577168772581684054180857079043118514713110789821807L, 831074940986864770606182178399322836847228121652761832757138347607854302319554982675232726636384304498778785479514740680412744583419522149543399966567920143859795060163521216006298931840533392917849058567581108120641040805075968778819777634170892479988903551831712612041813474606204742475269294851860427019391978657974178124202705122601796493351215361874504822340745910239792703424728644412775832968906630969687532047173853312181323723004137008215791468677736371197923746266277700306168020264735501659495776358116580720666581181005655832542965471984656448295181013568624255966529974959077677829299050438667693104623140154429394300799058461640000476204074469977568635227322031749237725347586661817546477997933899809589760825160456998738827637495262852694267095972218250668736079127605900875617652482566326547306812873121806664527482224300931251682604309727418168902327474446711056689625128404980431154334013917047917994547196459659036834791094892093399571092662100589007157953067061080713982614969035469393997811643184217315704571158082524587178364723311210073535967339199365621004671436919791550470526801438968197576724496161224959717223879956665088129308877154943344342560345411562227931030286564678288638568309940930739295347872869L, 626631344928154033206918544928711710251752239612457243581147771111959471662911550605714958921907148398882577582746623523833217206516935222841272937097799977636773460041209031714586688187078675455188214235981239143406180198015480856505850356295622999930785209415210893475652939363885963292811841868415163930242572899987751091869221625852819981078911319939769779143935034333993664451674900468538301179308570732154591858416602499301666796661649833763357011015465631364316783427716091519335780131664085702402530818512145899062280170248134421721495031677942235089742660681890469958011551988910943293535555630200865459276487382995006693672533074161720777473868400768282002000662236356451222342937299798186083677555947236392970254470135598927875872643075479913158132775119991210917771795127003642541933604182135954723271585436498508589348541655964694588235211956334337387699679049486855089162838082565071425474437845633467561188898507052705737071632736804734969912188270806940670154821210163321184320553683762226558105335098844187219631072911117969658913395799367647447267564365174070075581575363314688554509840112102727183785567598288721790312422406191594444333152403255764668849251402512839936177366590701850307755816887958264482110921887L, 932465817435101237604110585206897348383166900724577367893809053182108673364583460018934950540963295189482526270295039656084385948148545778859048053079618142247054794717876674757496624304265584941163551956185874949858472524176299376997475683763145824439717594073999955214018421300845113940437341061254854360147575640304856282187779552635568165095475579308530838075649181621163443239840431855343725692944761559181949910032050682497982852219415258330550009901934795755279297161220607057586725097878198712046640346539662765164464852977077844784534295176650319110700912420546946523779377790530266331403608910490218552866538651534256082217317347703819506948172630769148645085065570306154521086796306824497454581459655187385118596095811303628805441898154608414671945145870316354098878327036211708493997270461077862163197528256503255557317072907107737945222264606040723592486085140447450437155137530520588798221545439420127198091971543208289779922334570855389621573120499434114298772088229927744350056271332745256859573346021944455077499328234468247587028209516970708645063892166598352404978509311875137383329808421458681151449587963025872492794726228326697737169363719987654507813508128898551542280069744253558365413943497570814962594312801L, 757303658304940628634078841663854877874345808259585902062331246822060605126426689110558488390729857722120696841316517337425490207351434584419814984080006203521036583628592637281125699135738897150836033771072851949723769659304274358670630958995923622897147629893261617794477739051658495323845204564310249996360067481191097678540965843294412576966772672413902684773236463124820935466037474433548059984264262963361088416234379493161236604157138275944548157684423247033793082450776603331130403660719012525279205145140024833425314075844815896798316140278776933002119347376390840390289758112703639766790418053969790632336034413917368929839211040680249252150380015577623196098394692966260066548488218620571725605457271675935591377934881968908507459031102641310657939254885576070824240298541693445090582665915800190735321659424528567174906169536180919573390832379167628299057801867122139000571493111675992196623333602040674451473527545525425325990928370739672090060605298099131447123801139072169450528382240492485041162008221272353650039518079831603302581443528978161925724102104620631391635356106998230854362195544929022747826070411927212726680108494904305896018280278951707178277474305320728768960150777529757216092298465703686943399514359L, 862112357301913197194665475656394615857371722153833718025613319373433297226935287668564365316203323502639864925148420196997120559544542738751780007681025210432474203387366106078639800838012682778690641380594158468563142978655676627989837530580380825325380925631724823269228230188025729270461287353323580121084484327816708633445523354991802243697555752860553664269964745505269556163854394551741278393633108805854821803063176414583442923256482768362687408910246887238333225729382594148032335967422637062282543960760426995550896420197205601193626673593044798772164693559409329108978688074948383907700246913729096922930561592475398500324748690462154585911566294378249541770324080593730754430451385041159734717030169364026924994520726113034466504532341107536896519354024495283217239944996195852039271939994918699409212908114371271495290543245126488837697614052922985697425321431755627126886340054178472071115554312306810819666079021226689212802122790549975977028252246181296948133800821507401454378265126533040653171276325359236198548589714584470568812705178698726545371725744542452078742748822702209715199696814652529022697439614129412837718099156374998916878732826793117583611005831253692017842215231011349497618894273495688278084137947L, 875645952772063611917640418102183061420428919564894845099697866107991989885630344329164408156049408391676311793759562372062628477617506196394638800644137462727701868095953087752758664108864338718573153787492791064260097805970898251281999205476948391937238236404170976003272611857847654040332904384494240128483467003637624487929870042178928440793030880752232076986119907352694686660402808285847280568495871420258322640018885466679957475627932862140983059746399095433726263987468972949282203764145457741783807424455597586377903963776640477973685085417451548428065221187536544994973034356147681284838902360827191932148255042365731421256559201052789163289036972675720884833026426479099192271374954662404871006485207777331800749570658381188315596773046799814401107281695715529572597136428421048885120913286900401476687293529829692839720586538260107161947838938106357850367319708480376505885956416145589591173808760400071794095170235777334301012646578515317813621888407577105432786873230682100775964842326391685426899240721061475917333523414707555073443013004655122790728980943367569760005914396035468392642765418313885838188806321954693013831645101388563014355756373701858598013830611569030580905394303743539503721745197838920426274759133L, 882166133722467215160540812564700283811933102178411175361006355331714350722745251220028674656531168708606719813692479797254048757015627117063286808644080941982977402137380254183985397422330550302420860190317347338136887974805767702692463205938538978472813130074713002441409214165664013704483808435986951237641133732552170709983643660916945680779820865764880736021256670721909920922425766215045976787989675511978133569807150740777448398817406140282995427688230365228898822142650968979243444590950301293170037080875306243368336060785027666167860002997939776222781028554361457190606810284300510564456392816403207924653821903620750293136877662677581124114022555745117724342267669996766072147177454873751048790771530063351424955695629309317492710054550987092350177582277956151040264627269145576491684093495020487511194346012670886173469615198644272572983872238862436410684930828812049088072740010287423292993852461532930054643885733411268921189435762661200048310797195677999873235016130366616497036257778911536470457616640288667369381946845836645258888986920548534518953090684919468031902766654097036134440989734952463717834233844943493741480523948645384557607969362153042664302734952671603396836951233566000870110744347778135001635424743L, 709964038991915442801446476291999739701440763446094070820536853737757606653791336349103792756723712260177004349890249683314141514041320284468978174164737375241981715758555097741801948539171898516165336694472250389606196194090796425377520309324973211530521616108323714773535227055732301449513758251872779159834761311271869737199890010732673716860492296111721210862127822040759586988086879903428918946141577411545242035505116863062034581293767166475624834395674099279665949867506597926350379911709495436437540183146083713666596892846189410001529354704575087163588279441152433812879179580985006560455195835016489816415174780143880242649853596215188043434936478117069039673960933726053235084630700431408296822491246662485767419838052924308574337806977575845669281704974633892278931495093489290838124082230357583916132742794401207427794965506207302860454544086301639484682404128934754723945320930845340131740497474986819725119045683940479107826948987184540321002417512864711247300273178757890665434685799397770342478541084194129734085039316512530759153306143164609338470675437287281288715505836290347271686348458940817759429125174041761123500023534188965699791561896728245328796807602614014218797945262950396901722538393755298694047598007L, 751534809463182245305569499744426222432584301875633740206916153906026207248974237377406884755257563801602671219795601258866473292797811639133876704685185257394807155626680758014952053396573475957157184936513659973285626238973573777296230966400780749035587959354480711247708835904337805175635975914772051187259571486536260501458096771861563201275900962288059644825814016759123200020430950286913596981114063660097993228154033591094307231703901041208637994850522757200664633084373978306255227718050791990164212342704375954447861934687215642772387474485660614319408472724201748659791717174823874792664365617290601025416070624549568531457936132456799855924298235518207998295951776962865877992477000845329830767173497456536586746419830329493472931015143752284305058697581608669918543694058414173622609954453753996406634079035299588139249111754382257378402763130047331697197756738011637299449943907811555448332723900894202028922537102928306284751940813879926673065037119943792960618230459884107191341935956932311967279241320542465335735209151109769686589318051426936831255381992611545895545125613417813782157140487361834857707394356406540966063734197923580059787619782497251619011604985583682835477674887384707124789884121994261203492029211L, 852248088410410428831867200886431461460442173652078880256752512208512845112870181261306352300324747127307116864009712792248203387104693365400921793524695005533368414419868553673949333490422264357926967062070359304418203520498662321132458158085267118006643433557595382331761049326017287036930668115556650826315374462192742466178823572039753754658080433346410107821470243185272292673745384529361623322918722249893449136110606919004829356636804020578018588746619907888783789875419988606464507968621153969627595776066326443288889314818566236431177768161445043925180337896229019032887063183300470192597219814005061596298100381849685649164066284056749343127043540801358101069729709175027303177341731169451186967835147569187024142203304309578552258266661328661185463586602302206391541198277048250692051772831431702537778569020204117516923230390445252441117547502678996291602952510748067537788070017365296182851123201189852938658175207488663933284985470167290435495952373851092529676966826799478416155252865835972976968094895755586515432468157394580619635332804532811274727802836204436983776625829830634236310309434343569720155362624073056217911682141574464904522300470714919006948089583322805701673091859764569254824622687678647617634470617L, 730037320620558138188395446173930478997986174987674847741497107059769655283804849137326041562010305658959789691806831671947850991303363405924839354144323015185669177780413322916908949312185664136747934843113948037320697368885788646406365604901370793964903473516424282795442186722192848664234987370634187329732035494471242990522446226858082594297362090479586957219657322803524060591156435883148346667432917755894210172335461848482599595681697321275436496235452448152559345708137240058271793156345523451832451384890980116665123006408363994784365069960381646791155966056200574299450592131821625029664419336892396951884100111888039318688589724767447960741346427615885441958564818722104681638593697413435959790429085561560140373127238700968395958266975856124903117766463908625994486391906161265647690598191815653974511652137918247362906510658903933500923131753052297890384039579391342990817492420282426857535507755682706745450262424764067253633623995010100875548144103972124097229895879453293308353901114523976011508313880703310109158410816998660849694402746064666324136702064129268550131379002051978808449733992363622459299462257563243966014894988034606605681124639182848506165944653929028112679448420136147681629081937205209075638014573L, 669768349399214441232816560881008459323591322181980339970395596196130268748678178430096077719015229882781972023335096132200217936882199331765647492836505350652715836022713189395746693698429043107285354350325789886545804472918356840424069723196781627241390266708881354339714482195485641728803334436761248007236839124835659505712078786539491778110065492989653151704794068126142420986652265618587445570185688034510476259259399250246267630679752063849310546437338583555027413195794712266662988475364862874338026737367522184385966255791424209843955885835344526213510950695112165430297698529484093253032588891371631287981751613871986337529036442415751898928082265531997860636487492040421741814696750213212319386378371832707527282533845920315853992781965092574290837896621994515663372425616319594653101611322438786490833721814616409435429788625620653070491916753784691334744788749796910109980084863898364629812751171972932138709891331269050951855998045209205536646248885497277064101548220084052291851724542805250760521730509219047575981570995162992093941354671402794608410734833574370280782507935846809883607532302414205752571814225847367540028400721992960620286136957647008398485661649967296808058338989577371861522374056335608502853475807L, 546760316986920179013003825030614965674997150715329713552250630889951248781184595158771110994033668925262013693832891295452011898235299000893823586974149119345727799456242985117375614775645721157532890821585387152878555206655104664684461061555496135546696573088751792612007680539642695132697225155710363791523855442646960112209746341476267230604209532439690435224767983610800532748563891201385965943920451962863515578322412239422109048597726867562126304300958647921744399868527742185959241522900698375894160418732791172017402936818324731320500933517251034891155035935593651105995415015033050960213041009980007825037695432910850487038288768007937774333687118805780907135364861112447794426057881182422026162184147864832881621627208564267345042466113011256994615360190512140658305112238624905330350750503598172925606655063828623292578768057189187905350205271604574053286464991571914947454704755320877233809943769715692780417622828844052094297281851024867959136923303466965671967660603510680746820938837986628642599120767602873915894055958432031247226125119858183937386147613781472615143535812432980833264785273930059426879494820266117916422189094510517520463426590945592611782064268839292177400984532533947329203429160584457633360880199L, 635189543826738453789586971534670513908082752038496943501696643431460877068755550855387637213187366667426231194037510164432842375175046622747030861267326470912264864123365514642520658494021659866283823974050001994355251634612400721049639437838407072013395470883707872777252948122434604484113809706359296685252217702843422409114703956284615186856393356754311478053072087482502317355847960626013868084306720254791942347556091620421873865727895440431627950168359995502026694504436961042219722615371500912912189593229566147558677176832963052509493220767145488474713425197177993244583445613839851138060333670992831326424082017776294259736033678192655198239419858237784798689025303096130763464840401526242760697570642124339239769801653701431437065081289056863924487337338383604809960725048861262210607463245886405805330085372958587857164287986311198676323078911030230688678469831614447585498657714784503962894796689484724247962248938788404298859995534381421947763468066084820799410877696962388383099974619390065056183229213002860429335491009684933457023019115991369476055080043150920390163334310028928673994969309847478133754516568345227899778635931297906609309396257043912086734287388556455500521808006726242075972687557915646184582566189L, 827342455538113877634688859220957669538142386173981312623426324637175706536385286201888781058311963575258845762694424695327040421591789399098999620827867439647153561041391682379542100683088037607617223315764740840958261526426848224355025889644279450343876678638738534326070473698700339211216784448524089260075506181585681092365962189924608620425322026758153516442692617542808301743500345741063848975704006477903979434823328775649320359499366720697356481208053429394987363506105493113646430248901169187119100401735780823264505455733284871455949800577606913638420268492579251528137358369622772233511493205763053156987509063999932451032045474821058199322185871070722192993151027780723592100457705825283757239241090705312887088468730096558447766348615445175413899107124372283991709259275429044083615786304794079327895234367225837396508815728881157925325526748022270622640717923520977720460641336805340708857518236984907996838977066581228015513801450442336404562079055603890040776984296533564734207018906599923282228570061917557867118098980495670856711126197841478606226883704248939638079231928085357218250615806118999151469782192971102331540309449834336206436149602928007360948802979380484172200925090651285675768711693649154190442500393L, 732040229873269167998151725931694827953681634389003321715808630661561448917386847102680173890493372907563669096770285785096869596774883746467330460640100024571112070164181404718293888556756948010101240858221166059921781018106587403626095940013486431225056790000782144631692909661325434107368044457259092807487442867867488114053305011936657480279272155674919829416951276674869976448104823803743993910989075908308597935942710167596420034130320199550790695889158007547279429932821494409538970258804888647694541657760428391945053883752482784657381045678692432834875031250858221771506322325686794399206632851806087587823775936506112969381501792945868268960410846439940168338290850725535081201727088569936235805714332620288223598091400533766424173333196233253753255894372023600901363123070686983695664737828536193304658652925618978309443139763877264731378506883830929790778686198570762765983505653706720125631930706428591406645101955188731334910936051250230556783240033908943832691762383615496834421924388554260884948797616851640588942337119660046065519346535162741665088822473037721583461125742016804849122610792120988692305906668907203201147776331279746905673813621404780201660410932535055086349235691658084926581270911409262672303635269L, 918012828676885761564967778371533147054998190524798777673942507595577223954826598624343312649833609913161575105825678997028406328822475180240462023448198966341440533088837370565064571981064667066074478135725062484334202115966857492157808975326273520482843426416479764206168479622728456230113294096603890044710878950588230831980929816241434362062360106418876840398598889802758466850476970697042894313402257314895650495611585194715942891275669318999909224365242680401590841948050887179673354672801316059592738798720702038828880018522809243825225765805501366501306029855542303087261409176861064956400230478172217402998110566888096626533319810567200733520328951260074768281428081444684424796044231819706773679146643399264903917994345669418603038382866526703896838694401270084055699448780141397374618795933782963841727862710504075205332376864406152303782876266576597494787695876342650024935404894374374183827413184058567258955341777477104367878329518161024529335922564803473750037664183895995223277699286168088051814903297027197636271393735302187201348618028304036060056374217421054068586220160750542415530099096604123081187962020694329567039023107218006083234307730123634194767729379985487485492029075508780536121500578457806324795520431L, 697528970333692600683082480444817282404922168962926886407513069519978994819868812794809696434044573194583591884163537210902568749498692834892672969739683385320796419779198273908795249945329477301458149351619921381466632275700166574839742205581394803041986118838666129026894530445150041136397761059612428386626805675082461466865798079886617610621339714806275057755494270699122358449409779371550113214402749725318590728509665322399843580842027084068289572749527647941388125799316737916432017090299591224227642643368120159857645176571500812207619929839010886697840433536414136429560959198983598089035625840141223534763205777531873083865255266365720955300690571319770859531340887067164393286345567961969421783862865220938504349849780615638525538025628550409862691208515704347170662515012981903315865267766722475209620402925434116236781805426849673578584224283048696683978206308755780578079091471818358160733850511345365205636598972579428344776777247872994467718965025586798141053365691238225136409720514481164838086542460893245626272768937962904327562442136695442321807281342422658326785051511128108197620612691091322965267294952119744312818354613108373466265021239714865540659778556125530730638295430968862923770744886070923312551618167L, 902993979026152794571449064410077559630558572961135315258105206572699579180606188951945818530268822938644096241727335937038624313399739251583806576429826224421727315175173974767561844797143751193672068453435780737238356456310231596537769231950087367237945143075232000396933293088287056808691441260464779783997758689942438706244985514712156593525225255559221476269063098469773280962647369963117253382696225450249294833567603330860825407721941095995412718366258438899351981132847051340663219356811480588325450697506114983931103124697203928311360163415901439785362060409240013517966221312071085149475430376758857274070170212495406215534169945841793505978353034564329201853562145051904457760791956009818050162568964986876398677885472116275752770972570910015644297575871277893749802688308460670896704445035366692937155574458141908936965191624846516841701902556865715673634387864769598536083121674784803633132354238193819505850030734937646560824837141504650063470734289523055246831742160145002292246771751193504939512728170575487192513044023570731731101255502021792578479299393329603241928166583026924263379257040399383262584548251344282575081870541351522929062278255489926856654164437862560235486562832197913847752142878143543521469772133L, 637713357029235442088224920024534812746442677150160439651969937755628287271293642744578515659594964670424524762495008022474319703832395900810411130731964254051536178783761855059617215118370686727348608786694942604514259618925484767777245154451780648336176876654488348157701879157668037466079690454726894108856732058949755078419307071261982343155248091821050366928630629549375578222489558536001479967562583941962586641484399797533874468364143385282120148031424391775301061346369801046761737216692518299388794478664245171048535294816285841301199006923462769115925544584646526190638731986560543599373408691254151624163600229594121170708343282737394649273028664714732841509945189841726058677179382754554321955376070031815821084544642746085892388367660766463714193975157372878116475079638845336749615771470838542716931496546551348835481290661381293328964175331203449284029033708371608345109744601393251051313091808358133249118754208111002238842667850955486749755816783592624135662771886133172077068654472795457276638042727153456362715866670630091872639181550476045685768753198239098226017731469130634993975433109227020417296762448568931924760548950031290165791844197383593206191881558360185075286636908852218168158665552287963162582418087L, 679725553495692097055188746479378274610077340382503706864660262043462738398105738733072322227968097439997043805049531865638157225392251637956141859479191876716762134249371704917508585929474390930725798682086594632448397147580992333199550835969189748926980534830124173618637797175799202739742952625945995888611706933956895907614794990017246469414361629238619349111520590509734288588568175472536121301369067775699959452654841517755962239421313770161975250627933579219292171597003755034358898095225997731489907697970409804158172074953183036935857930410577684301018872461011699450580829537478968721307010240257727356815106550264959657723300269698247739613573055759953803178682649328021024713687251374936952189874841446690899392150051156421689968897177659437306062100201310541013316038954530809863200862830349249259174628886689712443374629027137831338309664911030911301804868600415072541129918587053762905376792794954033691193485589600966982212876779207478120670645778529164843281839967972961973482093772462099082234604483825144700930899657516441608993746466876088126030820949721748474527824931104672130880921451076361489943610905055767544468829533852293036942374458872589115478729696231797746313358876150858263470473804294210525985399587L, 824398413774647721727783716927493082045125284132124780509061235986524002058349936469190938473386015500456524825409276125648471053181975803449490915100666247016637652642018232488976785632063759072275018626406774944471170385450728440926459151058085006131652296610735971351893924703077263224679691785651786207843470837599105545136455108194073888837826753658074939113627149636241476392069177004712810205870993425082267842272583580784209957497850548958876740779253563871955740945698689015009783725280097833652493493784494517786430543318863431142980228743033446229772717350263665466084337237755069330646207279722842832786260828796593072097441484504891123838156907117671605134678908865404730036641844036620212310974888253409088856675954684304757689293628813768192836788282269513146761832929988661554746373689847317340366396866412323941539395026032278994192965796569864688073297700421938866348921619002469012811505915273272178729567824277621832481807786501538339778508269937544558706557426115874178163656290807469059414238681684898271080996217230818536221294295334155280858291434356747355104259698639068675113188599299112659010002288146665619274233052748169090316523290096402431073377138230108275257100681182632341698425189779439335115225021L, 729789110508282204924893011258715520278751671687930384199193768543357601768084873889493115025800388224832843218786460165314840811338505417587264046792952570764387223286784583459569482887062713089214944915149570406576083159305215127017051588810499899650667516555279235788055597156293158793019814043872646776963824688481962359780666149941126071039586394756747251899496964903875617477313254301983271088669790588656997570527692977956668563487909028235648471392692637876472813771963048739143583004605016690211332342543669239673821627439682197444193531036548894413367846891774313089849981632141024353607305158600633456256380283477196347358175973827038360775988583000208569450943718962348337500874904551855936922849242633949735534461539165391197984784388866306869280751861676004869456787539332465732952694904619709236048375305764211452224989381679509951562521023586908203322943746942279517534848827989468052041932242333594323142252986910685128884925321161926954494389490093703641131028790201914333053648116079609960799828055524608860528550284537066025459717879544744845265426180974986091119083317611702878408906728228016903382233168414974020874410892981023660661125861749370815095865411536976287077103479819047453736690867895011612015313151L, 816674992130389032056684881758504324994741295893764524528735965439558170716583180201947457501093111915836314988083504433098076478277100665654281738322481980867001542691600865433419860809776399073137046557241004405720515256017605620836000306804591208524756797598751458541862088949858880183920808500357973572594626040160358949711493106356491604607347225574163942177300795695366017860893672512673200713214179547881276769366887569837432868130564300369717577242934355587844525552830047771388560362692424367704608244865509875013393483559756834314041482079743519792609527801978715898501316305998095493440403456659851292564661210165570296860186989082512706711082543617317867742483399049841423548709609238640506029837652787393512975610588516108375394543304683880447444977547346361387037817011070461785551507566287682589449032883016824598981590777091920838584825248802596884129802578165733664235473821247157696087170083274408746643554913326941373270166591300097209235125783777805432539234785578945347765477797282293150110551426072765008115650115577485169689283783317194542813291146028589925471270548460350947303644115327424336947520702232543581483078251784649275590063837533170244707770954428006625422467270920279852011094293244673890672908577L, 759938787879624387431718533039360054979046947448290625789420009050833847666859111649616324082646479974093218877661133347217843575606591256924976158166797938853446083685455729154676968137468645102647252956542338070541358373675589600033932520025637258733828199205830800902832095460742534002576702858144211541328599629939166899674248245410568902611021391251011109076676759752009258558021718994759984791551131215851513113778972790839153447811284142434173922173626524980110773459678285988419595983259749265943552518791858438233814840067400877249790228547808384922588370859586159875091312467692949256161993274388574426389075607010788490031033367716883700546166413360107616037073349774383101808871526282816194762817866235818197217873191756004744629658365669514795015240660013101287066581189807300715862703523692958436562943634327895050931540270138560638883414974878614830015741964225184686776051068299711687540870916573045206800104858891525044549565274733740056349962463032216527708091819797202908789150092742723977264160515304991033017750645952911863480237295013238645459266507891379174138445942625432304571862385868893112132826370420791275911127530018539083874857650457277468684711816659384107368762804435889305016958497738372936463693107L, 752725792565203045290835763014873980634374487534521673109134908502795151076690970369095008559923123198912263710225073466384799749282796913627097305706921002650759988480945389216725243597948847048687998482070433846555687136761778415257285906316335250360681304323154685810411097868440482518720920498241857390954880428555460606662786871432188413388588509100615296656371130776407570719887898769657588305852332203837127965724271419954919048920627794744968863385620952147456196060071028268265332782742517614164013119771362210086159871300104219751474438638986812295912995147333138619926397495984054715761434145633522872090604610269471727847853269814418018022787615988517602258489851188405128706947702859591151455210078872016091633358670357343200339240755230889698448644462828753660330223502121402952257996949403559494445916235568181644138693874462381514311154367650781028573326602986209768530130332976699763602369916677224004020785385261608074513821389294024320851054664795110030451329954890745510762854566890001963785394156776399484297937470115162801591923176408615186872726334802479286062083693529044345607651277027730504774582186824628801110249836018448240532337076805332522691243646152841062833225987429474663197782735607557141571300023L, 720561489798591812564403361838257036222707383387451856165685945019392262690053420892860760303986057026810899904046358643772618389964339614205794327691784707321469475621268949219536565115355184804649507324791511390526818757344398013393456355043853918053835738935119670358019403999989820801307586006109442868304778618109046476914838773175443658759579132228638208259522293920308976858895571450571211614568832400261381882211382604087572233135100166692957108737322889980362397165422712642857617316934411826857761508194058414062927835185476347737466160496001958764694115800621879199913365706255282243029578027911242317043359784440997774228540555739178808591553589954506231143396001284660763894848598277416555434345561300264847822385185204266750072562077217576459251292028365511187602130810388170699200505592853255066772814715626721620475056286048189241280372469847190983428444098429279145445275616401776211939041575710098393995227108309390623458665396284343369086966675763831728064206186109579730952841853746539682219632347603551614831886656642328902425175803211146750834059412663311786892609164267913252583968957369138646115139870113083702238493768612537889256725244763349731313034202241264452609132011221905724392521576177587485164574729L, 875418977056654417495720733261610830891222626826418812257914699676665788242586041093055067355848945337380404446506988770345266518988408506531263667859217530010119660248938854364898861362981292822645180629378103680376283552883787677479098111714733809153553434595373277140379578024338123407559771191763139215668248804908737294529818258368006397844794974845627060489846799759905683624412434482671877901086911285431364493484410638909469311342399930760785645050199795589731631830674679659443252091247978378181332414130788381975563819297331834913564685791634997359693643791838701368952028847280755863621032309273437267128496416116348739405362276576987022476502534068817454097055730773902961138242244039017438206691218421579515738985316379670360366255789737782040410554869596686981712590823390133214858627469835982602486316318223173340147921798907116472055606654019289028266871210404666941712104784364064571896684854589462096412812367942353718853051258035371729556280931627722333210387349331157727386163549644314950756996218296669469219939488808796294943472257586115436560323655901068252413550972962179072151465796779015360013292884210560834095824052044266602283627238489148835636475159989800930593574344818221106010739984370509297781603067L, 796306344636225865510484196984606308162996239980541325165416373667002151253175110049992132672350921638305230449521651481343687741096542237782177423679826636254868750772310157697826171489786484281863094940503588175414608166196925204573390552816254902627212941758495604945234148832758887347045037000138156996061449987218191693752655181401775778306237891610650525530862961282792926660614066961685830225297291953736591013363830997124148063047702685080685054468966066483543484584938863268906898337531119982468573994443197416003823574278193941831426222517267399169256683349326309377109269260858216471388443599956684949593463000000906735182994379981312148104507351712601658405637242383421921919609902102562789920802443699838897012895689643686304627734951413089926204333232035187143684324148220063107114997374344405739865161965181859792876946814885641475380689459815982169649454469112451630408728138409002728477085113408375188429154035534349778823956592016335696435374491887009927713710863588473101181734498614619272678645997555433727138405786145423348207541396946746408566803038489701308486730163288185622075533052269039944474694769955057184909644062206169086635677829549839538678279192648597926436788336470360211837108390376894051583838293L, 837987466929809772899504093483653734121811825838817678294038950187300414418124032909119323671221111660700486089033592165563631452516593084810324188345770080090354051577746756457041365884793455055992902482735812642995740148791318538142227148600340587127417613753927323396466284215487949670806555222255973785304184436149801786052174735767210213399873048503897428097225961093332699548988195769387989695357888035449210342534576944197016472506534320106426202125065361688932134345955185095462966588568101089397185402965188640828762423686146105199039606142574088114991330143618991634727192851318136878975460438549957886557223742719947137678400638773098601844199967600167217983413215522594198668439935934565493452393395049854673750165966859116089689571029957969730002321230931389219728447036136731502788597016866775676395290976488853879585126169471378375540221893683960598584203801043716049253533187034736497068049812361719146182684683883286925732275479284618152914653567884831289247290192877706305146186277158830975471414171545718950050537281699032397287763357806917505229973784696755360466224751270336200526002107265453404668600597942122556252120140903635713433344193500648272958647096189779239871692017218119092096192789133908482955701127L, 710223632550820297982565071761251096048759589938385971702420678157712952676656628498062588502846673616275718982778362619623702582063838169334933227108326778807543895493536513730729893442666111473681243213518787054551581159683395579704930614330575884661677070306767962130668591010041877552720004695566810407134057102860290049069152724051669214085361961321830237658274114481461468076946561409454072972427634137775905970374162597969666997947987923693478634891070048038189034379556395057434289829783461764025033308252147680150446897849019510704552674482684728568268071317533394165875221072655643133671516995919076954215957783443281333670998439274913822296992169782682294698634344025901179511047441970810847566287334502357466218288672106966677063482477323411851555970907665146102943984246484324934128891366715780683673220219633783012384232912965102205389957495957069407623289751061248723827636968052930869410717008658371414206567702035515563168928701678646626836424513826968975771391868856248586338768427842674636227369882160171655973405275924805234385498146311369878940995489502514968241252364425108996903651636887929161312258608843943628805815317843782220051967888072333446974147688629760538398305359809705477801924872843338216934266177L, 931722914920225180347704730257785384410304850217933353506093662779282465904699779241282135235281049497018308851115339832553207254680553201235971885910960490609001157263223002483227771026700078931090224016045066315251133590497667571708959687498372846309322554755288789862988477175736238818196479938177669890546773603135535622765371569071219795262862441824495271567154024773544683403240778927027588878853034340470370361568807848422762454900936678574075332582278916427243960191713275601755270149625519398104265305163155819413485485531093568493333077124317884096096385976028321230322919868299016213340201391703416649565260613880756628998763681722717027298544400534995506872921354825708994126758520281258549237741986431314869545442470788036019832313627774477894277335771564597860283991932911307806853768383025589921179651418546581491095797377283384941818171514154792373952359841449609408725002866208588352658018439043099902703428474714475672540231838302741534793973997601294595946931763956291264920887522200407011591251613342735434053250829307313912347676906597907825182764267240653625196198513979244253505595114608622572638177445741428596457761276845535801346542639741214663297124859033083949341712585992967864775631454607382013947125331L, 951971255946690351073712482950939993065638893554821876462391579661435629298490691020311824679275174440016132000846095870817242413237237588517781254784760040052198559422741752298373858299495349563088238289185756765845601366116499800681973011306983196487853157289021686700984335816763878807200960414127371487721943449718151346311898719550831967230455751738751746108789321717383054900525900599830125204107803359938446094709881436210369845735132196473877026311614271919627265524130720470199581748469005938879959422538413593421104233535600239504856916073911373688274642089268946631546466845155708965395599594064300726374109222621226770089394068604581031325081609386475788547144716006011839776865988459052657422319706975741986409860797991208773935964680197718712254144406972919626397054122656470453148737677579882938380243002189250817208005564789050768544206508074812573106643388119374075395568799861724299385404925403084230846288525627876218628760132181200802276042064772012610952478142616360834445829836369740693119683401636152933670358698899263665789631680696761140002339197647890562878126651977256590421276229219493051341034203108108854977960754157316084866675345890193115282222346552546880011180045765268843823796061773088675470506601L, 734060332431221158098980212919822250250037370431781268475475217704242950752418907445369138968143792058104682394336864870112877045169703922578752325070519622992263815963930608270486166274473391557919894742952457406970135830908446781471776408398539669965743398434497514560872739024497355497947100465499914399517231747328581194628106201805031302603398091088547820416732764333809264641558332664188970302798785997945199409946422300283525599025775433491390021646891410706008509374302992430142245795858660319700458985773972932150809521005804829909082646647871181771463869598014142663140031626418723507246911514341004505426090315885365214216377494491944985594722165516689739638020507896302159747318394042464292621162092999823840813316440097113022837143387132373725975888940461188878051172698568300736824929283063123758734182351605734288532137718859447349250334296494291868605530849197338680520157042046858176967318497238059602789258713737510972073500985782412423784969489830429101328601943244758054293094888255743335093990521722445615760020278998345382500240762923112780174851161924220621958203585474250449266170196641317443145616884712056655628532463727057647269171788389837047177217364843750806735021998794940784915510509219769003913395141L, 628083197900318971945215283786551915551192273000430130555406005193625709944346303435693134436180094903923127009107503084113038468668978584754233435954069136955936590466728430128678312180532004100294453629214948925782801963201899282435945551967200589763405412044034307068861983953168267731484033229574763563628777707893569992741529247535416040804401553622100035079607124381465266534891864757931383584172915077629450677394787639803571837588492198446730549481127120511127195045199203072209270860513358175652673838859420777884290894540074765143687262860859688147041114029602070147486779282605063710442290396585352641963546336460497096943860631106848733704275524258412237015459875897951171814222218102607255249904018480505247171274737439436130912660182703112236017756718756887176223212505456577936847572318535658349948015425432153753135675054678652411451046391886344831659909358107929271964668314056723831227601539156931691089101896030988229530987188847575239307922423025491610338099809391006741773953906306882443684203893290625340695695606211460489785986949665047877503839352261013291107767948811754276222448935293105366173642579605624382198183281250159441246071672189118188936860162714532297289655548085762459568329982103731708571628721L, 683235666280100563056467723506324759645029512334464190518228470162651154463288535722290966298872076307441204180460005400521699053677362879913930792307401098698418965478641292512997214394849753501180590810994948956127878973267753121707903770710053942270985186120104560434545514472555271586575445733798645850262794458428264529405669119899995080850621952890475757981033320159513200020632405362668085695415928580414545804912485436867644020930313587307945342920381507846018419909276662480856490175268098935234555376959894304032924405009827962661368822957877917988466213454589306675580786795848048379419341327070212742793923645518578607020928537276399128976291125448765708851104834429833343974552402151162595486770960592116221689319938616815747181128267295769177354684082058120647397725089141287204820140082598967490111069341627651956018230678371580105547880771475397792119876266414916723796258192932911070636259349135336598452052485794068746817059806507946619407517426557599199600734147258182352872982602995798248492372781676438120068254698018819737829290902530266285049952267015396201902828916558080037387239911603392358684386072186055897058330473220533316205411991288801679150974296797692863467931802355187326828908010833024352368310049L, 971461376999781698498090920091369244779216918808758287948753468611463200398492011796975540113062715849906998827715677829708059942365848641776892420765668355773426075002024069417310376350152260472838540190562737371336889697913982960524359327682824489013480778518217841032073300295723867570344802827206032080876535945768216755869765775587226687508837698328268536462692360910583719114007539732811055211516652827906687884670377610427486685329063423058273515066914394647213045228259132502612436085244203582326969311636166551548451006366958716824872896931478081860503272753446715184865839440898897113544334455985835085543744501051855882544826949045674036800832086749853624329664026179808446590768180281905999104355415492424931212323563084498788636506091186573986945269267121667869453852199428287254706585390795181664374884121236563533466854200159188310323004564935488888027073552491611100912739633443197819229601155571640071503813018221442491537800573640863493343462631225231089243834464823410793047758798295668713964015923783018024437909534605990363387675210224544579473941425445951743202068303432869698517403987944921158629246142437395608753426640920671369212708849150509696646590447000982067803807091771644872785389662251395670145349547L, 620760044169650979927385505660468360704688274100354375299267551715211628816248746342546216077194078460321164669751714680082751831726634625436649513912702945871365729124654798040681471041285760330147875177132145418624169140351740224056615787876626312168107945185302621726864341202760133127650792068081109935913498377167544494214201717592455381991976982640573041477439241962599114265376600970949493652910970816263068079903558141740549638646818729916685256194675103537000741777910234589742323902434048865804815770347281571784839192977582843405881264102536579265704677870345416456248895233749604007194802825561982316050524525909153397676826558014322031254830591673285859989022231549801344077137026808309922407591726123373075590777202682895534477762197311968123475817426979260725083222218225501047133406660007396663059371653507945439233649027828317510207545022400555129927146199730878396866726407085788920034824278434497598904701182223458967593998510659786912331034059130410269877009553336997309790861956357622731414191602703836431143099250821953885325428344801051074427752570436399708678547469174496222794226061899265225912632980248383138297553289508442560846517683785314784839276636207968083824232734200142755407792300095769053910737513L, 642014400599379697465804506700334775234086042140773599164659863111238744638552618209759516633474939908206123543213691462810613852027206019998828835665817348326419012527251611997322289524502292340487573595616708439610146126111484488937151457301309118524100521984185076398867059062068945934488004177893558379041926925100014446520295795484253140114820417944910027877857980828611345211629886719961896810579145076498342320914089268964525606776758420980665375385933696637018741799896949697844114885433626601066215880322431746327632738808436023368807649417914775370948576659653731770765655324093931202533175830862718465751837467453949563647053877221398407521970535368109024744039554774900812158478786026894176025992702755207591908995431124937677102465253180688161977989952544313950938279675064020288459080665980360849738403437791638752304888339607270464486809242011874384119086399091095136043829138630230228353512883868638957856647269091421812565954803684917599732525929182115504890039116719157153212564748095601509063122716547895067958496657751686814512816452259572632651107790431970774511573641457050252733028075365699624865202858578294609897723949275809776495529757331013500355779304887641445799348570074395450291704796887926384846320763L, 897638348615839579075219966901001543791453229633139123903077857841066837546064212391135612735049505339982622304724602888335232170399894988383309894554615175420180324528935468136474978046555116436768344525356088591538191061778295747142514228042656315132895905649142476279856351349919815953211762494460696561800666718443975118209243253717646728082564289116136624163258561217982083507959203745228810317576019091037956979627593644634215886690614113885539052997089833633020814900335206439005531577716281384539793634134502168721949192754406782114829442659676645009556438493020351539831588347890542777544449588432457706384390737116718954649279765611448825102996649549588112906931155838124856972434040961670373003974090503382329996479948021219683523203948197950616371272088352056535847191669905482371618644025944437337622216127876201788336181352156845816650427612058224286721069900446585831802090083635987030162232179852097763331565688347793751686723896486962718810471913793402870454195433911153333723025147207430112140672538406280368965196075022047467494485064182470938840256386526576860803729644293874093011772794099947741741357293140126751357718291491240693197976714978401743598562313440305682462379365766671467115386289197063506800409481L, 780455814781524342921431739047876375500281779449288841199916654008641191225973901746442500447555355056375631054713663668012160032716436582991128779242634042809200682985887369731083666621375623134965114741072178635423227046196890788771943825552731927975134728616719519449831925948865946609225313659616994441356576298000206416084259678858738799569986206984562191591417385584836942624566016898428102136979712505761172033810887493260884274426634537872961730776786462463400375033382202142586527809053794526614836350542514410649357333596641798421185794221738367131329362470140206029911739459439316059667298338996275412156293534652831259828880386911052085772870884482186525771766543610837613366428269272173434453118938582124626584467367166852461663268026646332183001171241060973872737086464905439039760075638632319777667316840378148237511364748728853754998408855700070957170770690464316476367980304736141226702568606616550444213469737100063487105094586906846868920680536926452307205519278256032570367719319517271561575594650886597636846406954024410354584091428987508436367814744878739418385784349496019744762488644244145947869619005675284001294765821215165745904351606180468679563146554787630025974118446923097508056456868279762712349689359L, 847494016140724344306315347776025970869001193045381558978376744566160945412248132259390427833257851431245293831120519428081507323959142900995793929193135935558854893907825422895507476311199024966637054314391610851997809721598117751292132792801287390467331962916255048656772286784494436530449079958474679560609852226595329770202007001591737100449218956341016650708672927556660109897835518493374769807470452110774527897260621232328035221177444855175680490392305456390025789520189948910728272738273931012089870491749515346592449567067577987354486532308214093996671553204616806760224471476986457914350140608177813596104756111943607554827057035432309175260636053446710583828099808009096765593518880110642598085983104505282370646931886482017509323514203762077537192034401582393193207356808431131691129564037023194506577910937543668963409143003823452345612158073775460739117103467677600360305253243508130684178107270576124916525695687390716680187097981991899572474714686391225060076423457966980175086070630710304503584758990856458097863889216610145550788482339535785844813113219429921251223932140314959893925669712307438253533353003347756806571941949833574255910543148743842325191648331955652126047653449421098580029747833199151883891996531L, 665891112586826559049954342127915180128314480534741413063254782998787776757459238897123961888848510597953138932915716334786877237735684435209576872658994563614188301520444611349401823568301258500970527725875041142717981832911043494419707379464238507783252347243427392888023964027820888090135851006056734661514921295972641235210818466983330722924846916897172666899156258018408450172685907091872008969329513935051754326379587573756653913457848650837076020876896245050757217913200454181515498164885770441821742946509939782747075277644724854112272990486154081912054753421861240417289984056229127913063409897263209821710983250697563614405678141941603385429293368977972972099510897318644269811303591703168418855476222742574757324709363999314170848223880557261235150687811598985554705157134828906536338577544156427290074417113410806626256401214985240271050419340359559187825177444188908849216536904306335839540041165841642794662788197485297098902872029773850500428912104138099902415659928481968004742302501521424271477629344910165537691733976122875630143270089622981648299944019597617696424487177867520038745737128864512701113821407999279705913893510722978927529538717326358417317544583346166545443814773993871892663135813729590092881542297L, 874383640962454644723743404614724629589046354724752941183583696996012440029627029269508795909905733361685084457892199671584355741519658429888490168170051486510847266975081046929798272979143879509521603266885975045887181983087481166866838417036845843193027038172136718914054043247509465549615412562201551477558987364507612279338762904750574046978754869353082332337496388795401980258147724190151587984135115562095252424274098075401233231124698705106384956430617754146915897404211986678947560045448087305648769696009675507896480856534009838215407984459280124784492084152899397133409194191645443909261868563780557294422024928761305926499717278944887101684759131206988628957688363595977064273486522357812525410185715172771302327408443308632054270815792777327976658586601663689798006625337264372336667685265057039006830087998519709046109538685358363492687448618346678939144746886244079967243014805910551297445505284817978257192235956280159879996736747181858862027618229447427119470565334004330760624094511206917869365619680851510392602358833272990966776256745549683606587456820872962137768560692432514967416486274546501470382151956935069608785629324005876802287075569627412520892477661238804338053566665049531018149185836825993103587121849L, 829686127791954219071557272416895179934268795741698005112144294798496809951080061632485975608334908526199609881545358346224938808008792405669688987489566701305092547453024670531150949034363690002784342105303093035171411181656511943939785834416154234500032363999448326916574255752267360243141625435499653472369174519528997503339814895953809144018652626850260425938737248037693832642011979765255517896037069514564607280644907998908516906760124621292459131537370493272461244593459788603862303952722216174568230725387075604855438766229471869892181776278347830456498863242754842251213273389874488236093399837857565190001415401708635821552176316613570664330579127884668821178795079368337898573725287402986723780520263340434638601216396340818429298270554551741302352010500235195358407647471809967696491665073971492893961084538331831892465124353736824802883367583990090034282514158336604039033858535262062635255275282878113588313793099806796416474898843307542782457486701777900763195088556060587035231093049260741713109312283560931653033277525133626834392932443156908561151951682667281355147576411385469414370130636618762699853896551685035424298538979922165654032779088109130780083399871263003583661780553293804737698751996478073833295645929L, 822463428062015993984073087117274452061889206226640905480549883958141595320270715251417297714794705623734667109572810291321881044042515948708060475515338037655799356498487536373643852031781728897543104141517483154537517091529902138030117592022555793864704764435491398059812909689622270087302002146924347768187450143638470098385283269449688283463880405974403889169092410854395382674821180660153180779225446664110689331485628076243288633837319655881463571161924541367563588463252268483906687131266388020449797238593943299493418474408635479029696836095733801356264862433008794725248332817048418734651849759520387179395770766517534585182466655660665716797940381651167570347987439573741149311193676930707186632711753787894443750965709637060619284939699508633459824446573101933226315396330080919279261546295125666699489334780646394440225811471656477897281701137147596412770464083556522105708518611541865750467823070781258040738280456283271303878982613166517182479629913973756904216462746967965212611081961545021072871720337553522542881505279548593341401340643780142546291988265336911649140286086092260305479875044911720105788662720254796015837415856334315498460587591076279370309682918883967268664346659449546565690850719121625421611035631L, 675300635077398138879969930325504702692515028214932316208876906943214810465111348680663699108184325603896912257957179528056843749850137535447859887486752941692425808224859267878771036579254289238367055571599832872980355929719485345116162663786746616370756028824674919853081405599278629504374298094734239173052232826710949228319865483029300404078143507037206399202663982588539554348635836949067503824743491436160619478144909968074446884078663954014350865562560100019004360133150839934863851927746243768282545247371443435361268162473075837278452337428536888728469687962405368319644075618468777515682060423693626188277244606930384695950198855321653268193169647570748507747870540241042729823099136308673866629240425564466398808363743647835242085777282506774099382996306098665476742223707709594891025035683470560530171246043868991592627523971717730689410027612145043942568031239345961357430712981342953224587076532653403861743009297368524357195259303062239976808962238863043486447977198378138002904134152344826999873346076202067409644604084728833655786519999793984405832203516226027009069106816771355675907252773607276976397683106750041569342111082660025685786225158039115156685579871614115581529768289319585462897678895896799918734194207L, 660694799120831355185174498779935431878292070294234139051502027434192550339059232622766175176754523255637897831431546859712107797710240754477929701247127783809388654248555763489295134377325046168508771181854435619241453905817869510704872130056073504031998114925042358471632193269166290637967839319159866528685377771223509604424507262288467935488671014400442277160620726566819425207719167154945367453665119453827714380498382228146163754005663101917860264320329094524521614144540961994484593133596858726135012559081957495277406885436893251832514899065425890956141135073361497991451140169163125084372424006611077191614229539094074511073729159526614281356843100946691455335152315128385490476943539810483745691945595113689944346110195595392178147174672470949346790621761680122808099660306105876541260376560084578116789049655729125504841212086835726183899062416407495559554399763500008063105745428441256791342541710857922308632682409533479707986683925459892913600260939125012683077568011861651458863783300540772383128970586915747463270980750005362437334415830760647182489854453251148767848842332073606154655785505937386924343883881157991687946637457664604839219752390354033401813970610720968477304051307054208056339829279665782521750351717L, 705023257802727005547308504345187642048282472250258605582277824545455597322043420032071751001740055493195896310560621670162883697028046853448785081717283602029389291082341646093833402006292575060986172440755845968935569504187161049298523386418997150214705344925253384313037402649360645597050823834594763828701169535743510789759735676306697145609391551204179788217678976836916798118442653461454715306669113452427621461713578943982887483020726287701473914991006663930410596936997158783913976901492878852976455998071726089306724171341046145909615467636368691864216045229289043406156916856367367009587817876603091781766647198014805336045592651589786703772955507939774061587172213365566749373780695303137963169049871434848535061771153994618994410652988925068913728576578589419149737688119818434799575120374532712889693151679893120073288193318322077578731959474542570760413224069831546740991980813242853155006269559922549011713100355758852445235708664092372736795554983507280793905614522547968744035207468842410458620935554751169556921718821684654732821764385044854510917857972306303188988439735447210340540580440183036809810072969397392289584593506859347205098778545465305736775034894722508641296275994782431725331116142373036812350522387L, 573689664030593987146015508808704192513236239358271710833373052909482344480112048805233615631400677471785212999444222207014761118637161199614899154728590689214285311773934299725087022163997964360364110147132369038631003575766730166102971913782086755266673549770828864786940741650837785273181425805451541036950569484935818557317351356866397370012320564741121310650397589860014958752110133744201218708826254384881698865542521273114930999931981984365658211155217903953411519531319244545120828618007513983705590110284492966609389901133724690961847811017475053457477013605616341865131522244029508813994867584947121269310272680221486906243673129119010405830994307111989431442908746994829271258808120945701206587842520173259325074354049828707210325876849067476227529656674216074265704589145268063782354368387782027969083140175549205607568171859254785030500744566729920858671382902880285671048717648481610563625856437441455927577484809912754111308449279382090444719368759680449046736477327618218618279843168617178767186752392229521775332846464473610185407647660096240718975077766132745399001788989849901446176313550121639464676353486172953201059017194623306500738251595545785653368756273051277545687619200033552495619648622117634467968440273L, 789647435995678303259126159056610695365595405106501553692335323516588144697922467403998224261721041297727624590627633044060921982244322342511546043234459904853319125890774846845412347195950759749946255725764769173924272635099872683630615770113018230114189012129570650932005678753779559061004213133805975481953401060127914216290233491175046649588993913737207873242021705686915499416574325145412899425343025723050851084671216503241717475441109976341039023486846152785339567989681707241175978003140686963313687070757419965722394437892383263353653847356965669312382805285654034559118487868127949366945932081658310268085838122180061836786751819402161525419203969755683895098486793864996456944161318820116674567616721142365938591706521740867257195933038407142317836077398825768428955593191524119099156616010717979212917653672394815979148559480553016837098893444420076102767180494516307349769406328743253307352387771559398404672153746271854796310311576409080905324703024372669612356798490612566296272210575472561200973669685919244439413021485273780088406163407270314428202844905393975945840847404914843147619266138308333991552964755756132259354156745427253478443928936282153013126114213301725478156161057715554560627961550432102337531270769L, 769755017479028144303888134295991180383608388188030827216296227588821146761386848680817543327860871292478380573844995762557664090583763738936811988150862557912291703917096334758541845562422948317188333764600762903631753831045397196992134196060002698551090866447236864583330128796575728331537786980022229238184921647056471108653372219204185204949717671755910839948165258350331091233704724417782629275024357287300047290520163862253872691639823101845222568191523907963717162705981105517532803676595722410592987367351872943370852304221705302656085509296318525690593624230425313496749323697911948965796208752716920616491027655301997225755645584022346410037457101029755375877180748428317228971226686132049748748755803884277964563572333652704052594625636133711354076229586571141232078038540110048235381643667965641575085582688573278160428644843885269524355945407552232688106165300740876679766404548294822842976774424174931379218825554061322759529234169735300108928011843623108886612073293845945683171102981121521886049924058028715874813296502972308919277582406344056477588306473212277675023556470927948370649514441395142120174487455438585337505061924099673349394672896237377190493879692645853725988223515315771476117272947589616395165566129L, 710525456170979381040349396532954620832584105055726297895434372444491813324788344572366868553371039299300230907994393778457232487388393870191223014136193706022873460322966552140622558152682374810867803046546965630598142517399790360865822520183137748917520389396764867179745805574111310393472121226049708244115603357454221010271932425451904852853167301627119824833172367170050490637051561704971566307934135966041721856419067482259104366934142774984809483067097374030562737304683405299665285422219974200063207502194665103404263025917603302231814400659711116592995523289321514230038339139579927912384191311799835791720020187139373602462647358821711398044544391711029389892704662846015630257163512343763859253087495577646388450035582563769161815918776767568459653946940077180491599161515070386715139426340125976652148012137950966283592095856736990992535397937419452828846801253873435335243323634066199404960532538559262935260349905628279279731353630297684116797606807652434858589582047092149430321332909244683856249301781654546563232569775027163032953831199719977341581846945249904156437331525148146833228518407003340160255366741422046908294135202443960831915918690833348057908561703403274420126708481945089873021569665945448489377484229L, 739839585721859932024821974659802818246350095577021028722070497697992093090732548739970512870972965839353444198801405450934236455012221561233075955789930612989167256692317380130951181653618179035222433341765339991201400716279932318726348368090267237750947367394890944400689726374431104320928849864502631577278694831524037239405932434164293787483335461325119681699640693650976570428980641180126546888249931670813664543259138082263179665644560633511335111693647973405960242063330916198684073558254826945768126988360020731369428524256080902880511414276031463258825950278564403393005370687726924022247621179708485326078505735239686755897521125294048259872869465173638510853896151099848373239547748595257565936212731082912791239322280523222755065761982158611398006320740086219825291400899025913656683196576375099224883815926019655707080425590536851688350816916827446820724512045014338440296201140522022435374756958573079636624956229303307585082759155773780488584146855769313917828200057310340261351789245945197952479619876522240704960394198993552474998570388929449198875186955551005155090403474320572615498559252285930772855234572604570211579127654554281447655315044911434076214217549171973647716644279057521942724836412386347848056558773L, 904806466130350095823237738235466565149116419740115167615252013429472870718453764334928944989974446264893106587283630380182954993862849950489048986066518895370097604215302292332563539757730856505933359247694335310149251519953664617696895694544085573697022732642590826174310187509331268045480294214434981811362716592092993321707192515812121686510290284582366955860544463731925202227311840278420403876107230233437388785821614716401137069654378913884512185526583835507621984455888670122061931153812101571563153522648443415193602390689397249596266332328353726173451118451175190049657108642912589626791964966721217115551048530686626712103861216604207316071929631898944954780490042511015644951502302614054658699036923639288644960410888998029401057429550397929941543644918267221440356215667397022577333794906282558876950898529353354297283076823480396034993972577166002646416605909232435919989511978628610231785190235296819294579045526577630720099104975244440561011411050236266533715307102642502010171914671647448296205723806421062781563889905030132932204525452606542275562200281824146465108263417565448858217721520951606206451739464993784659860260224920895480582558529039545949143318246424863871726730127295811753472568791306856475788466227L])
class Task(SocketServer.BaseRequestHandler):
def proof_of_work(self):
proof = ''.join([random.choice(string.ascii_letters+string.digits) for _ in xrange(20)])
digest = sha256(proof).hexdigest()
self.request.send("sha256(XXXX+%s) == %s\n" % (proof[4:],digest))
self.request.send('Give me XXXX:')
x = self.request.recv(10)
x = x.strip()
if len(x) != 4 or sha256(x+proof[4:]).hexdigest() != digest:
return False
return True
def handle(self):
if not self.proof_of_work():
return
self.request.settimeout(3)
try:
self.request.sendall("message: ")
msg = self.request.recv(0x40).strip()
ys = []
for i in range(K):
self.request.sendall("x%d: " % i)
x = int(self.request.recv(0x40).strip())
ys.append(pow(x,e,Ns[i]))
self.request.sendall("v: ")
v = int(self.request.recv(0x40).strip())
key = sha256(msg).digest()[:16]
E = ARC4.new(key)
cur = v
for i in range(K):
pt = (ys[i]^cur)%(1<<64)
ct = unpack('Q', E.encrypt(pack('Q',pt)))[0]
cur = ct
if cur == v:
self.request.sendall("%s\n" % flag)
self.request.sendall("fin\n")
finally:
self.request.close()
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = '0.0.0.0', 10001
server = ThreadedServer((HOST, PORT), Task)
server.allow_reuse_address = True
server.serve_forever()
| 1,069.263158
| 79,121
| 0.986267
| 335
| 81,264
| 239.197015
| 0.525373
| 0.001785
| 0.001123
| 0.000412
| 0.001473
| 0.001023
| 0
| 0
| 0
| 0
| 0
| 0.980997
| 0.009254
| 81,264
| 75
| 79,122
| 1,083.52
| 0.014271
| 0.000209
| 0
| 0.04918
| 0
| 0
| 0.000972
| 0
| 0
| 1
| 0.000148
| 0
| 0
| 0
| null | null | 0.016393
| 0.114754
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f64dd9f5cb5cf171b9c843a08877dabf687a42e4
| 7,528
|
gyp
|
Python
|
ui/webui/resources/js/cr/ui/compiled_resources2.gyp
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
ui/webui/resources/js/cr/ui/compiled_resources2.gyp
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
ui/webui/resources/js/cr/ui/compiled_resources2.gyp
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'array_data_model',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:event_target',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'autocomplete_list',
'dependencies': [
'list',
'list_single_selection_model',
'position_util',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'command',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:ui',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'context_menu_button',
'dependencies': [
'menu_button',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'context_menu_handler',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:event_target',
'../compiled_resources2.gyp:ui',
'menu',
'menu_button',
'position_util',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'dialogs',
'dependencies': [
'../../compiled_resources2.gyp:cr',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'drag_wrapper',
'dependencies': [
'../../compiled_resources2.gyp:assert',
'../../compiled_resources2.gyp:cr',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'focus_grid',
'dependencies': [
'../../compiled_resources2.gyp:assert',
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:event_tracker',
'focus_row',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'focus_manager',
'dependencies': ['../../compiled_resources2.gyp:cr'],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'focus_outline_manager',
'dependencies': ['../../compiled_resources2.gyp:cr'],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'focus_row',
'dependencies': [
'../../compiled_resources2.gyp:assert',
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:event_tracker',
'../../compiled_resources2.gyp:util',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'focus_without_ink',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:ui',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'grid',
'dependencies': [
'list',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'list',
'dependencies': [
'array_data_model',
'list_item',
'list_selection_controller',
'list_selection_model',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'list_item',
'dependencies': [
'../../compiled_resources2.gyp:cr',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'list_selection_controller',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'list_selection_model',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'list_selection_model',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:event_target',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'list_single_selection_model',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:event_target',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'menu_button',
'dependencies': [
'../../compiled_resources2.gyp:assert',
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:event_tracker',
'../compiled_resources2.gyp:ui',
'menu',
'menu_item',
'position_util',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'menu_item',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:load_time_data',
'../compiled_resources2.gyp:ui',
'command',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'menu',
'dependencies': [
'../../compiled_resources2.gyp:assert',
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:ui',
'menu_item',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'node_utils',
'dependencies': [
'../../compiled_resources2.gyp:cr',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'overlay',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:util',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'position_util',
'dependencies': [
'../../compiled_resources2.gyp:cr',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'splitter',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../compiled_resources2.gyp:ui',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'table',
'dependencies': [
'list',
'list_single_selection_model',
'table/compiled_resources2.gyp:table_column_model',
'table/compiled_resources2.gyp:table_header',
'table/compiled_resources2.gyp:table_list',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'tree',
'dependencies': [
'../../compiled_resources2.gyp:cr',
'../../compiled_resources2.gyp:util',
'../compiled_resources2.gyp:ui',
],
'includes': ['../../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
| 31.898305
| 86
| 0.534803
| 636
| 7,528
| 5.984277
| 0.128931
| 0.231739
| 0.270363
| 0.177352
| 0.877299
| 0.858907
| 0.807409
| 0.807409
| 0.79217
| 0.756963
| 0
| 0.013846
| 0.232465
| 7,528
| 235
| 87
| 32.034043
| 0.64486
| 0.02059
| 0
| 0.599138
| 0
| 0
| 0.652599
| 0.479034
| 0
| 0
| 0
| 0
| 0.021552
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
f669c417576ef2e5bfa2a4a46dbad98475df1f17
| 5,233
|
py
|
Python
|
network_construction/ui.py
|
CheShangkun/Project-KnowNet
|
3544b3632a521e5d384cef113ee8421c73a20a44
|
[
"MIT"
] | 1
|
2018-08-20T18:11:01.000Z
|
2018-08-20T18:11:01.000Z
|
network_construction/ui.py
|
CheShangkun/Project-KnowNet
|
3544b3632a521e5d384cef113ee8421c73a20a44
|
[
"MIT"
] | null | null | null |
network_construction/ui.py
|
CheShangkun/Project-KnowNet
|
3544b3632a521e5d384cef113ee8421c73a20a44
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
from pathlib import Path
import os
from bottle import route, view, run, request, post
import network_construction.network as nc
import network_construction.database as db
from data_platform.config import ConfigManager
from data_platform.datasource.networkx import NetworkXDS
@route('/construction')
@view('construction')
def do_construction():
graphtype = request.query.graphtype
print(graphtype)
data = {'graphtype': graphtype, }
print(data)
return data
@post('/text')
@view('create')
def do_text():
database = request.forms.get('database')
print(database)
db.create_database(database)
db.flush()
source = request.forms.get('source')
document = request.forms.get('document')
node = request.forms.get('node')
relation = request.forms.get('relation')
nc.create_network_text(source, document, node, relation, database)
db.flush()
current_location = Path(os.getcwd())
data_location = current_location / 'data'
graph_location = data_location / 'graph'
config = ConfigManager({
"init": {
"location": graph_location
},
"file_format": "graphml"
})
print(graph_location)
nxds = NetworkXDS(config) # 读取网络用模块
print(nxds.read_graph())
network = nxds.read_graph(database)[database]
scale = network.number_of_nodes()
size = network.number_of_edges()
print(scale)
print(size)
data = {'database': database,
'source': source,
'document': document,
'node': node,
'relation': relation,
'node_number': scale,
'edge_number': size}
print(data)
return data
@post('/author')
@view('create')
def do_author():
database = request.forms.get('database')
print(database)
db.flush()
db.create_database(database)
source = request.forms.get('source')
document = request.forms.get('document')
relation = request.forms.get('relation')
nc.create_network_author(source, document, relation, database)
db.flush()
current_location = Path(os.getcwd())
data_location = current_location / 'data'
graph_location = data_location / 'graph'
config = ConfigManager({
"init": {
"location": graph_location
},
"file_format": "graphml"
})
print(graph_location)
nxds = NetworkXDS(config) # 读取网络用模块
print(nxds.read_graph())
network = nxds.read_graph(database)[database]
scale = network.number_of_nodes()
size = network.number_of_edges()
print(scale)
print(size)
data = {'database': database,
'source': source,
'document': document,
'node': "undefined",
'relation': relation,
'node_number': scale,
'edge_number': size}
print(data)
return data
@post('/paper')
@view('create')
def do_paper():
database = request.forms.get('database')
print(database)
db.flush()
db.create_database(database)
source = request.forms.get('source')
document = request.forms.get('document')
relation = request.forms.get('relation')
nc.create_network_paper(source, document, relation, database)
db.flush()
current_location = Path(os.getcwd())
data_location = current_location / 'data'
graph_location = data_location / 'graph'
config = ConfigManager({
"init": {
"location": graph_location
},
"file_format": "graphml"
})
print(graph_location)
nxds = NetworkXDS(config) # 读取网络用模块
print(nxds.read_graph())
network = nxds.read_graph(database)[database]
scale = network.number_of_nodes()
size = network.number_of_edges()
print(scale)
print(size)
data = {'database': database,
'source': source,
'document': document,
'node': "undefined",
'relation': relation,
'node_number': scale,
'edge_number': size}
print(data)
return data
@post('/other')
@view('create')
def do_other():
database = request.forms.get('database')
print(database)
db.flush()
db.create_database(database)
source = request.forms.get('source')
document = request.forms.get('document')
relation = request.forms.get('relation')
nc.create_other(source, document, relation, database)
db.flush()
current_location = Path(os.getcwd())
data_location = current_location / 'data'
graph_location = data_location / 'graph'
config = ConfigManager({
"init": {
"location": graph_location
},
"file_format": "graphml"
})
print(graph_location)
nxds = NetworkXDS(config) # 读取网络用模块
print(nxds.read_graph())
network = nxds.read_graph(database)[database]
scale = network.number_of_nodes()
size = network.number_of_edges()
print(scale)
print(size)
data = {'database': database,
'source': source,
'document': document,
'node': "undefined",
'relation': relation,
'node_number': scale,
'edge_number': size}
print(data)
return data
run(host='localhost', port=8080, reloader=True, debug=True)
| 28.440217
| 70
| 0.628129
| 560
| 5,233
| 5.719643
| 0.126786
| 0.06369
| 0.079613
| 0.02966
| 0.812988
| 0.805807
| 0.805807
| 0.805807
| 0.777084
| 0.777084
| 0
| 0.001261
| 0.2425
| 5,233
| 183
| 71
| 28.595628
| 0.806761
| 0.00879
| 0
| 0.810651
| 0
| 0
| 0.120054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029586
| false
| 0
| 0.04142
| 0
| 0.100592
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f66b8fc88b06fdfe9f56f0ece6139cbf763f6e2e
| 1,781
|
py
|
Python
|
venv/lib/python3.7/site-packages/numba-0.48.0-py37h6c726b0_0/info/test/run_test.py
|
Scott-Rubey/AudioSampler
|
10ba3e8f283dc92fb8472087ff4be8917595adda
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/numba-0.48.0-py37h6c726b0_0/info/test/run_test.py
|
Scott-Rubey/AudioSampler
|
10ba3e8f283dc92fb8472087ff4be8917595adda
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/numba-0.48.0-py37h6c726b0_0/info/test/run_test.py
|
Scott-Rubey/AudioSampler
|
10ba3e8f283dc92fb8472087ff4be8917595adda
|
[
"MIT"
] | null | null | null |
print("import: 'numba'")
import numba
print("import: 'numba.annotations'")
import numba.annotations
print("import: 'numba.cuda'")
import numba.cuda
print("import: 'numba.cuda.cudadrv'")
import numba.cuda.cudadrv
print("import: 'numba.cuda.kernels'")
import numba.cuda.kernels
print("import: 'numba.cuda.simulator'")
import numba.cuda.simulator
print("import: 'numba.cuda.simulator.cudadrv'")
import numba.cuda.simulator.cudadrv
print("import: 'numba.cuda.tests'")
import numba.cuda.tests
print("import: 'numba.cuda.tests.cudadrv'")
import numba.cuda.tests.cudadrv
print("import: 'numba.cuda.tests.cudadrv.data'")
import numba.cuda.tests.cudadrv.data
print("import: 'numba.cuda.tests.cudapy'")
import numba.cuda.tests.cudapy
print("import: 'numba.cuda.tests.cudasim'")
import numba.cuda.tests.cudasim
print("import: 'numba.cuda.tests.nocuda'")
import numba.cuda.tests.nocuda
print("import: 'numba.datamodel'")
import numba.datamodel
print("import: 'numba.jitclass'")
import numba.jitclass
print("import: 'numba.npyufunc'")
import numba.npyufunc
print("import: 'numba.pycc'")
import numba.pycc
print("import: 'numba.rewrites'")
import numba.rewrites
print("import: 'numba.runtime'")
import numba.runtime
print("import: 'numba.scripts'")
import numba.scripts
print("import: 'numba.servicelib'")
import numba.servicelib
print("import: 'numba.targets'")
import numba.targets
print("import: 'numba.testing'")
import numba.testing
print("import: 'numba.tests'")
import numba.tests
print("import: 'numba.tests.npyufunc'")
import numba.tests.npyufunc
print("import: 'numba.typeconv'")
import numba.typeconv
print("import: 'numba.types'")
import numba.types
print("import: 'numba.typing'")
import numba.typing
print("import: 'numba.unsafe'")
import numba.unsafe
| 20.238636
| 48
| 0.752948
| 237
| 1,781
| 5.658228
| 0.113924
| 0.475764
| 0.34601
| 0.178971
| 0.333333
| 0.102908
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081415
| 1,781
| 87
| 49
| 20.471264
| 0.819682
| 0
| 0
| 0
| 0
| 0
| 0.426966
| 0.116292
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
9cfafce9e695c0f6b27e9a5d5f2d867c0220506d
| 231,890
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/comprehend/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/comprehend/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/comprehend/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def batch_detect_dominant_language(self, TextList: List) -> Dict:
"""
Determines the dominant language of the input text for a batch of documents. For a list of languages that Amazon Comprehend can detect, see `Amazon Comprehend Supported Languages <https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectDominantLanguage>`_
**Request Syntax**
::
response = client.batch_detect_dominant_language(
TextList=[
'string',
]
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'Languages': [
{
'LanguageCode': 'string',
'Score': ...
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **Languages** *(list) --*
One or more DominantLanguage objects describing the dominant languages in the document.
- *(dict) --*
Returns the code for the dominant language in the input text and the level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **LanguageCode** *(string) --*
The RFC 5646 language code for the dominant language. For more information about RFC 5646, see `Tags for Identifying Languages <https://tools.ietf.org/html/rfc5646>`__ on the *IETF Tools* web site.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document should contain at least 20 characters and must contain fewer than 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def batch_detect_entities(self, TextList: List, LanguageCode: str) -> Dict:
"""
Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectEntities>`_
**Request Syntax**
::
response = client.batch_detect_entities(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'Entities': [
{
'Score': ...,
'Type': 'PERSON'|'LOCATION'|'ORGANIZATION'|'COMMERCIAL_ITEM'|'EVENT'|'DATE'|'QUANTITY'|'TITLE'|'OTHER',
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **Entities** *(list) --*
One or more Entity objects, one for each entity detected in the document.
- *(dict) --*
Provides information about an entity.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Type** *(string) --*
The entity's type.
- **Text** *(string) --*
The text of the entity.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the entity begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **EndOffset** *(integer) --*
A character offset in the input text that shows where the entity ends. The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def batch_detect_key_phrases(self, TextList: List, LanguageCode: str) -> Dict:
"""
Detects the key noun phrases found in a batch of documents.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectKeyPhrases>`_
**Request Syntax**
::
response = client.batch_detect_key_phrases(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'KeyPhrases': [
{
'Score': ...,
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **KeyPhrases** *(list) --*
One or more KeyPhrase objects, one for each key phrase detected in the document.
- *(dict) --*
Describes a key noun phrase.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Text** *(string) --*
The text of a key noun phrase.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the key phrase begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **EndOffset** *(integer) --*
A character offset in the input text where the key phrase ends. The offset returns the position of each UTF-8 code point in the string. A ``code point`` is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def batch_detect_sentiment(self, TextList: List, LanguageCode: str) -> Dict:
"""
Inspects a batch of documents and returns an inference of the prevailing sentiment, ``POSITIVE`` , ``NEUTRAL`` , ``MIXED`` , or ``NEGATIVE`` , in each one.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectSentiment>`_
**Request Syntax**
::
response = client.batch_detect_sentiment(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED',
'SentimentScore': {
'Positive': ...,
'Negative': ...,
'Neutral': ...,
'Mixed': ...
}
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **Sentiment** *(string) --*
The sentiment detected in the document.
- **SentimentScore** *(dict) --*
The level of confidence that Amazon Comprehend has in the accuracy of its sentiment detection.
- **Positive** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``POSITIVE`` sentiment.
- **Negative** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``NEGATIVE`` sentiment.
- **Neutral** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``NEUTRAL`` sentiment.
- **Mixed** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``MIXED`` sentiment.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def batch_detect_syntax(self, TextList: List, LanguageCode: str) -> Dict:
"""
Inspects the text of a batch of documents for the syntax and part of speech of the words in the document and returns information about them. For more information, see how-syntax .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectSyntax>`_
**Request Syntax**
::
response = client.batch_detect_syntax(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'SyntaxTokens': [
{
'TokenId': 123,
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123,
'PartOfSpeech': {
'Tag': 'ADJ'|'ADP'|'ADV'|'AUX'|'CONJ'|'CCONJ'|'DET'|'INTJ'|'NOUN'|'NUM'|'O'|'PART'|'PRON'|'PROPN'|'PUNCT'|'SCONJ'|'SYM'|'VERB',
'Score': ...
}
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **SyntaxTokens** *(list) --*
The syntax tokens for the words in the document, one token for each word.
- *(dict) --*
Represents a work in the input text that was recognized and assigned a part of speech. There is one syntax token record for each word in the source text.
- **TokenId** *(integer) --*
A unique identifier for a token.
- **Text** *(string) --*
The word that was recognized in the source text.
- **BeginOffset** *(integer) --*
The zero-based offset from the beginning of the source text to the first character in the word.
- **EndOffset** *(integer) --*
The zero-based offset from the beginning of the source text to the last character in the word.
- **PartOfSpeech** *(dict) --*
Provides the part of speech label and the confidence level that Amazon Comprehend has that the part of speech was correctly identified. For more information, see how-syntax .
- **Tag** *(string) --*
Identifies the part of speech that the token represents.
- **Score** *(float) --*
The confidence that Amazon Comprehend has that the part of speech was correctly identified.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_document_classifier(self, DocumentClassifierName: str, DataAccessRoleArn: str, InputDataConfig: Dict, LanguageCode: str, Tags: List = None, OutputDataConfig: Dict = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/CreateDocumentClassifier>`_
**Request Syntax**
::
response = client.create_document_classifier(
DocumentClassifierName='string',
DataAccessRoleArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
InputDataConfig={
'S3Uri': 'string'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
ClientRequestToken='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'DocumentClassifierArn': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
:type DocumentClassifierName: string
:param DocumentClassifierName: **[REQUIRED]**
The name of the document classifier.
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
:type Tags: list
:param Tags:
Tags to be associated with the document classifier being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales department.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --* **[REQUIRED]**
The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
:type OutputDataConfig: dict
:param OutputDataConfig:
Enables the addition of output results configuration parameters for custom classifier jobs.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the confusion matrix.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def create_entity_recognizer(self, RecognizerName: str, DataAccessRoleArn: str, InputDataConfig: Dict, LanguageCode: str, Tags: List = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Creates an entity recognizer using submitted files. After your ``CreateEntityRecognizer`` request is submitted, you can check job status using the API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/CreateEntityRecognizer>`_
**Request Syntax**
::
response = client.create_entity_recognizer(
RecognizerName='string',
DataAccessRoleArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
InputDataConfig={
'EntityTypes': [
{
'Type': 'string'
},
],
'Documents': {
'S3Uri': 'string'
},
'Annotations': {
'S3Uri': 'string'
},
'EntityList': {
'S3Uri': 'string'
}
},
ClientRequestToken='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'EntityRecognizerArn': 'string'
}
**Response Structure**
- *(dict) --*
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:type RecognizerName: string
:param RecognizerName: **[REQUIRED]**
The name given to the newly created recognizer. Recognizer names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name must be unique in the account/region.
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
:type Tags: list
:param Tags:
Tags to be associated with the entity recognizer being created. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales department.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --* **[REQUIRED]**
The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data. The S3 bucket containing the input data must be located in the same region as the entity recognizer being created.
- **EntityTypes** *(list) --* **[REQUIRED]**
The entity types in the input data for an entity recognizer.
- *(dict) --*
Information about an individual item on a list of entity types.
- **Type** *(string) --* **[REQUIRED]**
Entity type of an item on an entity type list.
- **Documents** *(dict) --* **[REQUIRED]**
S3 location of the documents folder for an entity recognizer
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **Annotations** *(dict) --*
S3 location of the annotations file for an entity recognizer.
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **EntityList** *(dict) --*
S3 location of the entity list for an entity recognizer.
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. All documents must be in the same language. Only English (\"en\") is currently supported.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def delete_document_classifier(self, DocumentClassifierArn: str) -> Dict:
"""
Deletes a previously created document classifier
Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ``ResourceInUseException`` will be returned.
This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DeleteDocumentClassifier>`_
**Request Syntax**
::
response = client.delete_document_classifier(
DocumentClassifierArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type DocumentClassifierArn: string
:param DocumentClassifierArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the document classifier.
:rtype: dict
:returns:
"""
pass
def delete_entity_recognizer(self, EntityRecognizerArn: str) -> Dict:
"""
Deletes an entity recognizer.
Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ``ResourceInUseException`` will be returned.
This is an asynchronous action that puts the recognizer into a DELETING state, and it is then removed by a background job. Once removed, the recognizer disappears from your account and is no longer available for use.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DeleteEntityRecognizer>`_
**Request Syntax**
::
response = client.delete_entity_recognizer(
EntityRecognizerArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type EntityRecognizerArn: string
:param EntityRecognizerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:rtype: dict
:returns:
"""
pass
def describe_document_classification_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a document classification job. Use this operation to get the status of a classification job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeDocumentClassificationJob>`_
**Request Syntax**
::
response = client.describe_document_classification_job(
JobId='string'
)
**Response Syntax**
::
{
'DocumentClassificationJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'DocumentClassifierArn': 'string',
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **DocumentClassificationJobProperties** *(dict) --*
An object that describes the properties associated with the document classification job.
- **JobId** *(string) --*
The identifier assigned to the document classification job.
- **JobName** *(string) --*
The name that you assigned to the document classification job.
- **JobStatus** *(string) --*
The current status of the document classification job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of the job.
- **SubmitTime** *(datetime) --*
The time that the document classification job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the document classification job completed.
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_document_classifier(self, DocumentClassifierArn: str) -> Dict:
"""
Gets the properties associated with a document classifier.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeDocumentClassifier>`_
**Request Syntax**
::
response = client.describe_document_classifier(
DocumentClassifierArn='string'
)
**Response Syntax**
::
{
'DocumentClassifierProperties': {
'DocumentClassifierArn': 'string',
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TrainingStartTime': datetime(2015, 1, 1),
'TrainingEndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'ClassifierMetadata': {
'NumberOfLabels': 123,
'NumberOfTrainedDocuments': 123,
'NumberOfTestDocuments': 123,
'EvaluationMetrics': {
'Accuracy': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1Score': 123.0
}
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **DocumentClassifierProperties** *(dict) --*
An object that contains the properties associated with a document classifier.
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
- **LanguageCode** *(string) --*
The language code for the language of the documents that the classifier was trained on.
- **Status** *(string) --*
The status of the document classifier. If the status is ``TRAINED`` the classifier is ready to use. If the status is ``FAILED`` you can see additional information about why the classifier wasn't trained in the ``Message`` field.
- **Message** *(string) --*
Additional information about the status of the classifier.
- **SubmitTime** *(datetime) --*
The time that the document classifier was submitted for training.
- **EndTime** *(datetime) --*
The time that training the document classifier completed.
- **TrainingStartTime** *(datetime) --*
Indicates the time when the training starts on documentation classifiers. You are billed for the time interval between this time and the value of TrainingEndTime.
- **TrainingEndTime** *(datetime) --*
The time that training of the document classifier was completed. Indicates the time when the training completes on documentation classifiers. You are billed for the time interval between this time and the value of TrainingStartTime.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the document classifier for training.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **OutputDataConfig** *(dict) --*
Provides output results configuration parameters for custom classifier jobs.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the confusion matrix.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **ClassifierMetadata** *(dict) --*
Information about the document classifier, including the number of documents used for training the classifier, the number of documents used for test the classifier, and an accuracy rating.
- **NumberOfLabels** *(integer) --*
The number of labels in the input data.
- **NumberOfTrainedDocuments** *(integer) --*
The number of documents in the input data that were used to train the classifier. Typically this is 80 to 90 percent of the input documents.
- **NumberOfTestDocuments** *(integer) --*
The number of documents in the input data that were used to test the classifier. Typically this is 10 to 20 percent of the input documents.
- **EvaluationMetrics** *(dict) --*
Describes the result metrics for the test data associated with an documentation classifier.
- **Accuracy** *(float) --*
The fraction of the labels that were correct recognized. It is computed by dividing the number of labels in the test documents that were correctly recognized by the total number of labels in the test documents.
- **Precision** *(float) --*
A measure of the usefulness of the classifier results in the test data. High precision means that the classifier returned substantially more relevant results than irrelevant ones.
- **Recall** *(float) --*
A measure of how complete the classifier results are for the test data. High recall means that the classifier returned most of the relevant results.
- **F1Score** *(float) --*
A measure of how accurate the classifier results are for the test data. It is derived from the ``Precision`` and ``Recall`` values. The ``F1Score`` is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type DocumentClassifierArn: string
:param DocumentClassifierArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the document classifier. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_dominant_language_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a dominant language detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeDominantLanguageDetectionJob>`_
**Request Syntax**
::
response = client.describe_dominant_language_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'DominantLanguageDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **DominantLanguageDetectionJobProperties** *(dict) --*
An object that contains the properties associated with a dominant language detection job.
- **JobId** *(string) --*
The identifier assigned to the dominant language detection job.
- **JobName** *(string) --*
The name that you assigned to the dominant language detection job.
- **JobStatus** *(string) --*
The current status of the dominant language detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description for the status of a job.
- **SubmitTime** *(datetime) --*
The time that the dominant language detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the dominant language detection job completed.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the dominant language detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the dominant language detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_entities_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with an entities detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEntitiesDetectionJob>`_
**Request Syntax**
::
response = client.describe_entities_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'EntitiesDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'EntityRecognizerArn': 'string',
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **EntitiesDetectionJobProperties** *(dict) --*
An object that contains the properties associated with an entities detection job.
- **JobId** *(string) --*
The identifier assigned to the entities detection job.
- **JobName** *(string) --*
The name that you assigned the entities detection job.
- **JobStatus** *(string) --*
The current status of the entities detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the entities detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the entities detection job completed
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the entities detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the entities detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_entity_recognizer(self, EntityRecognizerArn: str) -> Dict:
"""
Provides details about an entity recognizer including status, S3 buckets containing training data, recognizer metadata, metrics, and so on.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeEntityRecognizer>`_
**Request Syntax**
::
response = client.describe_entity_recognizer(
EntityRecognizerArn='string'
)
**Response Syntax**
::
{
'EntityRecognizerProperties': {
'EntityRecognizerArn': 'string',
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TrainingStartTime': datetime(2015, 1, 1),
'TrainingEndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'EntityTypes': [
{
'Type': 'string'
},
],
'Documents': {
'S3Uri': 'string'
},
'Annotations': {
'S3Uri': 'string'
},
'EntityList': {
'S3Uri': 'string'
}
},
'RecognizerMetadata': {
'NumberOfTrainedDocuments': 123,
'NumberOfTestDocuments': 123,
'EvaluationMetrics': {
'Precision': 123.0,
'Recall': 123.0,
'F1Score': 123.0
},
'EntityTypes': [
{
'Type': 'string'
},
]
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **EntityRecognizerProperties** *(dict) --*
Describes information associated with an entity recognizer.
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
- **LanguageCode** *(string) --*
The language of the input documents. All documents must be in the same language. Only English ("en") is currently supported.
- **Status** *(string) --*
Provides the status of the entity recognizer.
- **Message** *(string) --*
A description of the status of the recognizer.
- **SubmitTime** *(datetime) --*
The time that the recognizer was submitted for processing.
- **EndTime** *(datetime) --*
The time that the recognizer creation completed.
- **TrainingStartTime** *(datetime) --*
The time that training of the entity recognizer started.
- **TrainingEndTime** *(datetime) --*
The time that training of the entity recognizer was completed.
- **InputDataConfig** *(dict) --*
The input data properties of an entity recognizer.
- **EntityTypes** *(list) --*
The entity types in the input data for an entity recognizer.
- *(dict) --*
Information about an individual item on a list of entity types.
- **Type** *(string) --*
Entity type of an item on an entity type list.
- **Documents** *(dict) --*
S3 location of the documents folder for an entity recognizer
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **Annotations** *(dict) --*
S3 location of the annotations file for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **EntityList** *(dict) --*
S3 location of the entity list for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
- **RecognizerMetadata** *(dict) --*
Provides information about an entity recognizer.
- **NumberOfTrainedDocuments** *(integer) --*
The number of documents in the input data that were used to train the entity recognizer. Typically this is 80 to 90 percent of the input documents.
- **NumberOfTestDocuments** *(integer) --*
The number of documents in the input data that were used to test the entity recognizer. Typically this is 10 to 20 percent of the input documents.
- **EvaluationMetrics** *(dict) --*
Detailed information about the accuracy of an entity recognizer.
- **Precision** *(float) --*
A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
- **Recall** *(float) --*
A measure of how complete the recognizer results are for the test data. High recall means that the recognizer returned most of the relevant results.
- **F1Score** *(float) --*
A measure of how accurate the recognizer results are for the test data. It is derived from the ``Precision`` and ``Recall`` values. The ``F1Score`` is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
- **EntityTypes** *(list) --*
Entity types from the metadata of an entity recognizer.
- *(dict) --*
Individual item from the list of entity types in the metadata of an entity recognizer.
- **Type** *(string) --*
Type of entity from the list of entity types in the metadata of an entity recognizer.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type EntityRecognizerArn: string
:param EntityRecognizerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:rtype: dict
:returns:
"""
pass
def describe_key_phrases_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a key phrases detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeKeyPhrasesDetectionJob>`_
**Request Syntax**
::
response = client.describe_key_phrases_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'KeyPhrasesDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **KeyPhrasesDetectionJobProperties** *(dict) --*
An object that contains the properties associated with a key phrases detection job.
- **JobId** *(string) --*
The identifier assigned to the key phrases detection job.
- **JobName** *(string) --*
The name that you assigned the key phrases detection job.
- **JobStatus** *(string) --*
The current status of the key phrases detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the key phrases detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the key phrases detection job completed.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_sentiment_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a sentiment detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeSentimentDetectionJob>`_
**Request Syntax**
::
response = client.describe_sentiment_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'SentimentDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **SentimentDetectionJobProperties** *(dict) --*
An object that contains the properties associated with a sentiment detection job.
- **JobId** *(string) --*
The identifier assigned to the sentiment detection job.
- **JobName** *(string) --*
The name that you assigned to the sentiment detection job
- **JobStatus** *(string) --*
The current status of the sentiment detection job. If the status is ``FAILED`` , the ``Messages`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the sentiment detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the sentiment detection job ended.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the sentiment detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the sentiment detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its response.
:rtype: dict
:returns:
"""
pass
def describe_topics_detection_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a topic detection job. Use this operation to get the status of a detection job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeTopicsDetectionJob>`_
**Request Syntax**
::
response = client.describe_topics_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'TopicsDetectionJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'NumberOfTopics': 123,
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **TopicsDetectionJobProperties** *(dict) --*
The list of properties for the requested job.
- **JobId** *(string) --*
The identifier assigned to the topic detection job.
- **JobName** *(string) --*
The name of the topic detection job.
- **JobStatus** *(string) --*
The current status of the topic detection job. If the status is ``Failed`` , the reason for the failure is shown in the ``Message`` field.
- **Message** *(string) --*
A description for the status of a job.
- **SubmitTime** *(datetime) --*
The time that the topic detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the topic detection job was completed.
- **InputDataConfig** *(dict) --*
The input data configuration supplied when you created the topic detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration supplied when you created the topic detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **NumberOfTopics** *(integer) --*
The number of topics to detect supplied when you created the topic detection job. The default is 10.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your job data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier assigned by the user to the detection job.
:rtype: dict
:returns:
"""
pass
def detect_dominant_language(self, Text: str) -> Dict:
"""
Determines the dominant language of the input text. For a list of languages that Amazon Comprehend can detect, see `Amazon Comprehend Supported Languages <https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DetectDominantLanguage>`_
**Request Syntax**
::
response = client.detect_dominant_language(
Text='string'
)
**Response Syntax**
::
{
'Languages': [
{
'LanguageCode': 'string',
'Score': ...
},
]
}
**Response Structure**
- *(dict) --*
- **Languages** *(list) --*
The languages that Amazon Comprehend detected in the input text. For each language, the response returns the RFC 5646 language code and the level of confidence that Amazon Comprehend has in the accuracy of its inference. For more information about RFC 5646, see `Tags for Identifying Languages <https://tools.ietf.org/html/rfc5646>`__ on the *IETF Tools* web site.
- *(dict) --*
Returns the code for the dominant language in the input text and the level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **LanguageCode** *(string) --*
The RFC 5646 language code for the dominant language. For more information about RFC 5646, see `Tags for Identifying Languages <https://tools.ietf.org/html/rfc5646>`__ on the *IETF Tools* web site.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
:type Text: string
:param Text: **[REQUIRED]**
A UTF-8 text string. Each string should contain at least 20 characters and must contain fewer that 5,000 bytes of UTF-8 encoded characters.
:rtype: dict
:returns:
"""
pass
def detect_entities(self, Text: str, LanguageCode: str) -> Dict:
"""
Inspects text for named entities, and returns information about them. For more information, about named entities, see how-entities .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DetectEntities>`_
**Request Syntax**
::
response = client.detect_entities(
Text='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'Entities': [
{
'Score': ...,
'Type': 'PERSON'|'LOCATION'|'ORGANIZATION'|'COMMERCIAL_ITEM'|'EVENT'|'DATE'|'QUANTITY'|'TITLE'|'OTHER',
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
}
**Response Structure**
- *(dict) --*
- **Entities** *(list) --*
A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection. For a list of entity types, see how-entities .
- *(dict) --*
Provides information about an entity.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Type** *(string) --*
The entity's type.
- **Text** *(string) --*
The text of the entity.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the entity begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **EndOffset** *(integer) --*
A character offset in the input text that shows where the entity ends. The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
:type Text: string
:param Text: **[REQUIRED]**
A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def detect_key_phrases(self, Text: str, LanguageCode: str) -> Dict:
"""
Detects the key noun phrases found in the text.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DetectKeyPhrases>`_
**Request Syntax**
::
response = client.detect_key_phrases(
Text='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'KeyPhrases': [
{
'Score': ...,
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
}
**Response Structure**
- *(dict) --*
- **KeyPhrases** *(list) --*
A collection of key phrases that Amazon Comprehend identified in the input text. For each key phrase, the response provides the text of the key phrase, where the key phrase begins and ends, and the level of confidence that Amazon Comprehend has in the accuracy of the detection.
- *(dict) --*
Describes a key noun phrase.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Text** *(string) --*
The text of a key noun phrase.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the key phrase begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **EndOffset** *(integer) --*
A character offset in the input text where the key phrase ends. The offset returns the position of each UTF-8 code point in the string. A ``code point`` is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
:type Text: string
:param Text: **[REQUIRED]**
A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def detect_sentiment(self, Text: str, LanguageCode: str) -> Dict:
"""
Inspects text and returns an inference of the prevailing sentiment (``POSITIVE`` , ``NEUTRAL`` , ``MIXED`` , or ``NEGATIVE`` ).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DetectSentiment>`_
**Request Syntax**
::
response = client.detect_sentiment(
Text='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'Sentiment': 'POSITIVE'|'NEGATIVE'|'NEUTRAL'|'MIXED',
'SentimentScore': {
'Positive': ...,
'Negative': ...,
'Neutral': ...,
'Mixed': ...
}
}
**Response Structure**
- *(dict) --*
- **Sentiment** *(string) --*
The inferred sentiment that Amazon Comprehend has the highest level of confidence in.
- **SentimentScore** *(dict) --*
An object that lists the sentiments, and their corresponding confidence levels.
- **Positive** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``POSITIVE`` sentiment.
- **Negative** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``NEGATIVE`` sentiment.
- **Neutral** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``NEUTRAL`` sentiment.
- **Mixed** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of its detection of the ``MIXED`` sentiment.
:type Text: string
:param Text: **[REQUIRED]**
A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def detect_syntax(self, Text: str, LanguageCode: str) -> Dict:
"""
Inspects text for syntax and the part of speech of words in the document. For more information, how-syntax .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DetectSyntax>`_
**Request Syntax**
::
response = client.detect_syntax(
Text='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'SyntaxTokens': [
{
'TokenId': 123,
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123,
'PartOfSpeech': {
'Tag': 'ADJ'|'ADP'|'ADV'|'AUX'|'CONJ'|'CCONJ'|'DET'|'INTJ'|'NOUN'|'NUM'|'O'|'PART'|'PRON'|'PROPN'|'PUNCT'|'SCONJ'|'SYM'|'VERB',
'Score': ...
}
},
]
}
**Response Structure**
- *(dict) --*
- **SyntaxTokens** *(list) --*
A collection of syntax tokens describing the text. For each token, the response provides the text, the token type, where the text begins and ends, and the level of confidence that Amazon Comprehend has that the token is correct. For a list of token types, see how-syntax .
- *(dict) --*
Represents a work in the input text that was recognized and assigned a part of speech. There is one syntax token record for each word in the source text.
- **TokenId** *(integer) --*
A unique identifier for a token.
- **Text** *(string) --*
The word that was recognized in the source text.
- **BeginOffset** *(integer) --*
The zero-based offset from the beginning of the source text to the first character in the word.
- **EndOffset** *(integer) --*
The zero-based offset from the beginning of the source text to the last character in the word.
- **PartOfSpeech** *(dict) --*
Provides the part of speech label and the confidence level that Amazon Comprehend has that the part of speech was correctly identified. For more information, see how-syntax .
- **Tag** *(string) --*
Identifies the part of speech that the token represents.
- **Score** *(float) --*
The confidence that Amazon Comprehend has that the part of speech was correctly identified.
:type Text: string
:param Text: **[REQUIRED]**
A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language code of the input documents. You can specify English (\"en\") or Spanish (\"es\").
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_document_classification_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the documentation classification jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListDocumentClassificationJobs>`_
**Request Syntax**
::
response = client.list_document_classification_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'DocumentClassificationJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'DocumentClassifierArn': 'string',
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentClassificationJobPropertiesList** *(list) --*
A list containing the properties of each job returned.
- *(dict) --*
Provides information about a document classification job.
- **JobId** *(string) --*
The identifier assigned to the document classification job.
- **JobName** *(string) --*
The name that you assigned to the document classification job.
- **JobStatus** *(string) --*
The current status of the document classification job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of the job.
- **SubmitTime** *(datetime) --*
The time that the document classification job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the document classification job completed.
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. You can filter jobs on their names, status, or the date and time that they were submitted. You can only set one filter at a time.
- **JobName** *(string) --*
Filters on the name of the job.
- **JobStatus** *(string) --*
Filters the list based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_document_classifiers(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the document classifiers that you have created.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListDocumentClassifiers>`_
**Request Syntax**
::
response = client.list_document_classifiers(
Filter={
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'DocumentClassifierPropertiesList': [
{
'DocumentClassifierArn': 'string',
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TrainingStartTime': datetime(2015, 1, 1),
'TrainingEndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'ClassifierMetadata': {
'NumberOfLabels': 123,
'NumberOfTrainedDocuments': 123,
'NumberOfTestDocuments': 123,
'EvaluationMetrics': {
'Accuracy': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1Score': 123.0
}
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **DocumentClassifierPropertiesList** *(list) --*
A list containing the properties of each job returned.
- *(dict) --*
Provides information about a document classifier.
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
- **LanguageCode** *(string) --*
The language code for the language of the documents that the classifier was trained on.
- **Status** *(string) --*
The status of the document classifier. If the status is ``TRAINED`` the classifier is ready to use. If the status is ``FAILED`` you can see additional information about why the classifier wasn't trained in the ``Message`` field.
- **Message** *(string) --*
Additional information about the status of the classifier.
- **SubmitTime** *(datetime) --*
The time that the document classifier was submitted for training.
- **EndTime** *(datetime) --*
The time that training the document classifier completed.
- **TrainingStartTime** *(datetime) --*
Indicates the time when the training starts on documentation classifiers. You are billed for the time interval between this time and the value of TrainingEndTime.
- **TrainingEndTime** *(datetime) --*
The time that training of the document classifier was completed. Indicates the time when the training completes on documentation classifiers. You are billed for the time interval between this time and the value of TrainingStartTime.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the document classifier for training.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of input files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **OutputDataConfig** *(dict) --*
Provides output results configuration parameters for custom classifier jobs.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the confusion matrix.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **ClassifierMetadata** *(dict) --*
Information about the document classifier, including the number of documents used for training the classifier, the number of documents used for test the classifier, and an accuracy rating.
- **NumberOfLabels** *(integer) --*
The number of labels in the input data.
- **NumberOfTrainedDocuments** *(integer) --*
The number of documents in the input data that were used to train the classifier. Typically this is 80 to 90 percent of the input documents.
- **NumberOfTestDocuments** *(integer) --*
The number of documents in the input data that were used to test the classifier. Typically this is 10 to 20 percent of the input documents.
- **EvaluationMetrics** *(dict) --*
Describes the result metrics for the test data associated with an documentation classifier.
- **Accuracy** *(float) --*
The fraction of the labels that were correct recognized. It is computed by dividing the number of labels in the test documents that were correctly recognized by the total number of labels in the test documents.
- **Precision** *(float) --*
A measure of the usefulness of the classifier results in the test data. High precision means that the classifier returned substantially more relevant results than irrelevant ones.
- **Recall** *(float) --*
A measure of how complete the classifier results are for the test data. High recall means that the classifier returned most of the relevant results.
- **F1Score** *(float) --*
A measure of how accurate the classifier results are for the test data. It is derived from the ``Precision`` and ``Recall`` values. The ``F1Score`` is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
- **Status** *(string) --*
Filters the list of classifiers based on status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted before the specified time. Classifiers are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of classifiers based on the time that the classifier was submitted for processing. Returns only classifiers submitted after the specified time. Classifiers are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_dominant_language_detection_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the dominant language detection jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListDominantLanguageDetectionJobs>`_
**Request Syntax**
::
response = client.list_dominant_language_detection_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'DominantLanguageDetectionJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **DominantLanguageDetectionJobPropertiesList** *(list) --*
A list containing the properties of each job that is returned.
- *(dict) --*
Provides information about a dominant language detection job.
- **JobId** *(string) --*
The identifier assigned to the dominant language detection job.
- **JobName** *(string) --*
The name that you assigned to the dominant language detection job.
- **JobStatus** *(string) --*
The current status of the dominant language detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description for the status of a job.
- **SubmitTime** *(datetime) --*
The time that the dominant language detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the dominant language detection job completed.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the dominant language detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the dominant language detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters that jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
- **JobName** *(string) --*
Filters on the name of the job.
- **JobStatus** *(string) --*
Filters the list of jobs based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_entities_detection_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the entity detection jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEntitiesDetectionJobs>`_
**Request Syntax**
::
response = client.list_entities_detection_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'EntitiesDetectionJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'EntityRecognizerArn': 'string',
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **EntitiesDetectionJobPropertiesList** *(list) --*
A list containing the properties of each job that is returned.
- *(dict) --*
Provides information about an entities detection job.
- **JobId** *(string) --*
The identifier assigned to the entities detection job.
- **JobName** *(string) --*
The name that you assigned the entities detection job.
- **JobStatus** *(string) --*
The current status of the entities detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the entities detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the entities detection job completed
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the entities detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the entities detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
- **JobName** *(string) --*
Filters on the name of the job.
- **JobStatus** *(string) --*
Filters the list of jobs based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_entity_recognizers(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the properties of all entity recognizers that you created, including recognizers currently in training. Allows you to filter the list of recognizers based on criteria such as status and submission time. This call returns up to 500 entity recognizers in the list, with a default number of 100 recognizers in the list.
The results of this list are not in any particular order. Please get the list and sort locally if needed.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListEntityRecognizers>`_
**Request Syntax**
::
response = client.list_entity_recognizers(
Filter={
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'EntityRecognizerPropertiesList': [
{
'EntityRecognizerArn': 'string',
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'Status': 'SUBMITTED'|'TRAINING'|'DELETING'|'STOP_REQUESTED'|'STOPPED'|'IN_ERROR'|'TRAINED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'TrainingStartTime': datetime(2015, 1, 1),
'TrainingEndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'EntityTypes': [
{
'Type': 'string'
},
],
'Documents': {
'S3Uri': 'string'
},
'Annotations': {
'S3Uri': 'string'
},
'EntityList': {
'S3Uri': 'string'
}
},
'RecognizerMetadata': {
'NumberOfTrainedDocuments': 123,
'NumberOfTestDocuments': 123,
'EvaluationMetrics': {
'Precision': 123.0,
'Recall': 123.0,
'F1Score': 123.0
},
'EntityTypes': [
{
'Type': 'string'
},
]
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **EntityRecognizerPropertiesList** *(list) --*
The list of properties of an entity recognizer.
- *(dict) --*
Describes information about an entity recognizer.
- **EntityRecognizerArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the entity recognizer.
- **LanguageCode** *(string) --*
The language of the input documents. All documents must be in the same language. Only English ("en") is currently supported.
- **Status** *(string) --*
Provides the status of the entity recognizer.
- **Message** *(string) --*
A description of the status of the recognizer.
- **SubmitTime** *(datetime) --*
The time that the recognizer was submitted for processing.
- **EndTime** *(datetime) --*
The time that the recognizer creation completed.
- **TrainingStartTime** *(datetime) --*
The time that training of the entity recognizer started.
- **TrainingEndTime** *(datetime) --*
The time that training of the entity recognizer was completed.
- **InputDataConfig** *(dict) --*
The input data properties of an entity recognizer.
- **EntityTypes** *(list) --*
The entity types in the input data for an entity recognizer.
- *(dict) --*
Information about an individual item on a list of entity types.
- **Type** *(string) --*
Entity type of an item on an entity type list.
- **Documents** *(dict) --*
S3 location of the documents folder for an entity recognizer
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **Annotations** *(dict) --*
S3 location of the annotations file for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **EntityList** *(dict) --*
S3 location of the entity list for an entity recognizer.
- **S3Uri** *(string) --*
Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
- **RecognizerMetadata** *(dict) --*
Provides information about an entity recognizer.
- **NumberOfTrainedDocuments** *(integer) --*
The number of documents in the input data that were used to train the entity recognizer. Typically this is 80 to 90 percent of the input documents.
- **NumberOfTestDocuments** *(integer) --*
The number of documents in the input data that were used to test the entity recognizer. Typically this is 10 to 20 percent of the input documents.
- **EvaluationMetrics** *(dict) --*
Detailed information about the accuracy of an entity recognizer.
- **Precision** *(float) --*
A measure of the usefulness of the recognizer results in the test data. High precision means that the recognizer returned substantially more relevant results than irrelevant ones.
- **Recall** *(float) --*
A measure of how complete the recognizer results are for the test data. High recall means that the recognizer returned most of the relevant results.
- **F1Score** *(float) --*
A measure of how accurate the recognizer results are for the test data. It is derived from the ``Precision`` and ``Recall`` values. The ``F1Score`` is the harmonic average of the two scores. The highest score is 1, and the worst score is 0.
- **EntityTypes** *(list) --*
Entity types from the metadata of an entity recognizer.
- *(dict) --*
Individual item from the list of entity types in the metadata of an entity recognizer.
- **Type** *(string) --*
Type of entity from the list of entity types in the metadata of an entity recognizer.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the list of entities returned. You can filter on ``Status`` , ``SubmitTimeBefore`` , or ``SubmitTimeAfter`` . You can only set one filter at a time.
- **Status** *(string) --*
The status of an entity recognizer.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of entities based on the time that the list was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return on each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_key_phrases_detection_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Get a list of key phrase detection jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListKeyPhrasesDetectionJobs>`_
**Request Syntax**
::
response = client.list_key_phrases_detection_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'KeyPhrasesDetectionJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **KeyPhrasesDetectionJobPropertiesList** *(list) --*
A list containing the properties of each job that is returned.
- *(dict) --*
Provides information about a key phrases detection job.
- **JobId** *(string) --*
The identifier assigned to the key phrases detection job.
- **JobName** *(string) --*
The name that you assigned the key phrases detection job.
- **JobStatus** *(string) --*
The current status of the key phrases detection job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the key phrases detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the key phrases detection job completed.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the key phrases detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
- **JobName** *(string) --*
Filters on the name of the job.
- **JobStatus** *(string) --*
Filters the list of jobs based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_sentiment_detection_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of sentiment detection jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListSentimentDetectionJobs>`_
**Request Syntax**
::
response = client.list_sentiment_detection_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'SentimentDetectionJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'LanguageCode': 'en'|'es'|'fr'|'de'|'it'|'pt',
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **SentimentDetectionJobPropertiesList** *(list) --*
A list containing the properties of each job that is returned.
- *(dict) --*
Provides information about a sentiment detection job.
- **JobId** *(string) --*
The identifier assigned to the sentiment detection job.
- **JobName** *(string) --*
The name that you assigned to the sentiment detection job
- **JobStatus** *(string) --*
The current status of the sentiment detection job. If the status is ``FAILED`` , the ``Messages`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of a job.
- **SubmitTime** *(datetime) --*
The time that the sentiment detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the sentiment detection job ended.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the sentiment detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the sentiment detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **LanguageCode** *(string) --*
The language code of the input documents.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
- **JobName** *(string) --*
Filters on the name of the job.
- **JobStatus** *(string) --*
Filters the list of jobs based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
"""
Lists all tags associated with a given Amazon Comprehend resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceArn='string'
)
**Response Syntax**
::
{
'ResourceArn': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResourceArn** *(string) --*
The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are querying.
- **Tags** *(list) --*
Tags associated with the Amazon Comprehend resource being queried. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --*
The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are querying.
:rtype: dict
:returns:
"""
pass
def list_topics_detection_jobs(self, Filter: Dict = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Gets a list of the topic detection jobs that you have submitted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/ListTopicsDetectionJobs>`_
**Request Syntax**
::
response = client.list_topics_detection_jobs(
Filter={
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'SubmitTimeBefore': datetime(2015, 1, 1),
'SubmitTimeAfter': datetime(2015, 1, 1)
},
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'TopicsDetectionJobPropertiesList': [
{
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'NumberOfTopics': 123,
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **TopicsDetectionJobPropertiesList** *(list) --*
A list containing the properties of each job that is returned.
- *(dict) --*
Provides information about a topic detection job.
- **JobId** *(string) --*
The identifier assigned to the topic detection job.
- **JobName** *(string) --*
The name of the topic detection job.
- **JobStatus** *(string) --*
The current status of the topic detection job. If the status is ``Failed`` , the reason for the failure is shown in the ``Message`` field.
- **Message** *(string) --*
A description for the status of a job.
- **SubmitTime** *(datetime) --*
The time that the topic detection job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the topic detection job was completed.
- **InputDataConfig** *(dict) --*
The input data configuration supplied when you created the topic detection job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration supplied when you created the topic detection job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
* KMS Key Alias: ``"alias/ExampleAlias"``
* ARN of a KMS Key Alias: ``"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias"``
- **NumberOfTopics** *(integer) --*
The number of topics to detect supplied when you created the topic detection job. The default is 10.
- **DataAccessRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants Amazon Comprehend read access to your job data.
- **VolumeKmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``"1234abcd-12ab-34cd-56ef-1234567890ab"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"``
- **NextToken** *(string) --*
Identifies the next page of results to return.
:type Filter: dict
:param Filter:
Filters the jobs that are returned. Jobs can be filtered on their name, status, or the date and time that they were submitted. You can set only one filter at a time.
- **JobName** *(string) --*
- **JobStatus** *(string) --*
Filters the list of topic detection jobs based on job status. Returns only jobs with the specified status.
- **SubmitTimeBefore** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.
- **SubmitTimeAfter** *(datetime) --*
Filters the list of jobs based on the time that the job was submitted for processing. Only returns jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.
:type NextToken: string
:param NextToken:
Identifies the next page of results to return.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to return in each page. The default is 100.
:rtype: dict
:returns:
"""
pass
def start_document_classification_job(self, DocumentClassifierArn: str, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, JobName: str = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous document classification job. Use the operation to track the progress of the job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartDocumentClassificationJob>`_
**Request Syntax**
::
response = client.start_document_classification_job(
JobName='string',
DocumentClassifierArn='string',
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of the job, use this identifier with the operation.
- **JobStatus** *(string) --*
The status of the job:
* SUBMITTED - The job has been received and queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. For details, use the operation.
* STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.
* STOPPED - The job was successfully stopped without completing.
:type JobName: string
:param JobName:
The identifier of the job.
:type DocumentClassifierArn: string
:param DocumentClassifierArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the document classifier to use to process the job.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files.
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def start_dominant_language_detection_job(self, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, JobName: str = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous dominant language detection job for a collection of documents. Use the operation to track the status of a job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartDominantLanguageDetectionJob>`_
**Request Syntax**
::
response = client.start_dominant_language_detection_job(
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
JobName='string',
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of a job, use this identifier with the operation.
- **JobStatus** *(string) --*
The status of the job.
* SUBMITTED - The job has been received and is queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. To get details, use the operation.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files.
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see `https\://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions <https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions>`__ .
:type JobName: string
:param JobName:
An identifier for the job.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def start_entities_detection_job(self, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, LanguageCode: str, JobName: str = None, EntityRecognizerArn: str = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous entity detection job for a collection of documents. Use the operation to track the status of a job.
This API can be used for either standard entity detection or custom entity recognition. In order to be used for custom entity recognition, the optional ``EntityRecognizerArn`` must be used in order to provide access to the recognizer being used to detect the custom entity.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartEntitiesDetectionJob>`_
**Request Syntax**
::
response = client.start_entities_detection_job(
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
JobName='string',
EntityRecognizerArn='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of job, use this identifier with the operation.
- **JobStatus** *(string) --*
The status of the job.
* SUBMITTED - The job has been received and is queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. To get details, use the operation.
* STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and is processing the request.
* STOPPED - The job was successfully stopped without completing.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files.
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see `https\://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions <https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions>`__ .
:type JobName: string
:param JobName:
The identifier of the job.
:type EntityRecognizerArn: string
:param EntityRecognizerArn:
The Amazon Resource Name (ARN) that identifies the specific entity recognizer to be used by the ``StartEntitiesDetectionJob`` . This ARN is optional and is only used for a custom entity recognition job.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. All documents must be in the same language. You can specify any of the languages supported by Amazon Comprehend: English (\"en\"), Spanish (\"es\"), French (\"fr\"), German (\"de\"), Italian (\"it\"), or Portuguese (\"pt\"). If custom entities recognition is used, this parameter is ignored and the language used for training the model is used instead.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def start_key_phrases_detection_job(self, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, LanguageCode: str, JobName: str = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous key phrase detection job for a collection of documents. Use the operation to track the status of a job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartKeyPhrasesDetectionJob>`_
**Request Syntax**
::
response = client.start_key_phrases_detection_job(
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
JobName='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of a job, use this identifier with the operation.
- **JobStatus** *(string) --*
The status of the job.
* SUBMITTED - The job has been received and is queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. To get details, use the operation.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files.
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see `https\://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions <https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions>`__ .
:type JobName: string
:param JobName:
The identifier of the job.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def start_sentiment_detection_job(self, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, LanguageCode: str, JobName: str = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous sentiment detection job for a collection of documents. use the operation to track the status of a job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartSentimentDetectionJob>`_
**Request Syntax**
::
response = client.start_sentiment_detection_job(
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
JobName='string',
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt',
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of a job, use this identifier with the operation.
- **JobStatus** *(string) --*
The status of the job.
* SUBMITTED - The job has been received and is queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. To get details, use the operation.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files.
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see `https\://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions <https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions>`__ .
:type JobName: string
:param JobName:
The identifier of the job.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def start_topics_detection_job(self, InputDataConfig: Dict, OutputDataConfig: Dict, DataAccessRoleArn: str, JobName: str = None, NumberOfTopics: int = None, ClientRequestToken: str = None, VolumeKmsKeyId: str = None) -> Dict:
"""
Starts an asynchronous topic detection job. Use the ``DescribeTopicDetectionJob`` operation to track the status of a job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StartTopicsDetectionJob>`_
**Request Syntax**
::
response = client.start_topics_detection_job(
InputDataConfig={
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': 'string',
'KmsKeyId': 'string'
},
DataAccessRoleArn='string',
JobName='string',
NumberOfTopics=123,
ClientRequestToken='string',
VolumeKmsKeyId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier generated for the job. To get the status of the job, use this identifier with the ``DescribeTopicDetectionJob`` operation.
- **JobStatus** *(string) --*
The status of the job:
* SUBMITTED - The job has been received and is queued for processing.
* IN_PROGRESS - Amazon Comprehend is processing the job.
* COMPLETED - The job was successfully completed and the output is available.
* FAILED - The job did not complete. To get details, use the ``DescribeTopicDetectionJob`` operation.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data for the job.
- **S3Uri** *(string) --* **[REQUIRED]**
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
:type OutputDataConfig: dict
:param OutputDataConfig: **[REQUIRED]**
Specifies where to send the output files. The output is a compressed archive with two files, ``topic-terms.csv`` that lists the terms associated with each topic, and ``doc-topics.csv`` that lists the documents associated with each topic
- **S3Uri** *(string) --* **[REQUIRED]**
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
* KMS Key Alias: ``\"alias/ExampleAlias\"``
* ARN of a KMS Key Alias: ``\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"``
:type DataAccessRoleArn: string
:param DataAccessRoleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend read access to your input data. For more information, see `https\://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions <https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions>`__ .
:type JobName: string
:param JobName:
The identifier of the job.
:type NumberOfTopics: integer
:param NumberOfTopics:
The number of topics to detect.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you do not set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"1234abcd-12ab-34cd-56ef-1234567890ab\"``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"``
:rtype: dict
:returns:
"""
pass
def stop_dominant_language_detection_job(self, JobId: str) -> Dict:
"""
Stops a dominant language detection job in progress.
If the job state is ``IN_PROGRESS`` the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the job completes before it can be stopped, it is put into the ``COMPLETED`` state; otherwise the job is stopped and put into the ``STOPPED`` state.
If the job is in the ``COMPLETED`` or ``FAILED`` state when you call the ``StopDominantLanguageDetectionJob`` operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopDominantLanguageDetectionJob>`_
**Request Syntax**
::
response = client.stop_dominant_language_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier of the dominant language detection job to stop.
- **JobStatus** *(string) --*
Either ``STOP_REQUESTED`` if the job is currently running, or ``STOPPED`` if the job was previously stopped with the ``StopDominantLanguageDetectionJob`` operation.
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier of the dominant language detection job to stop.
:rtype: dict
:returns:
"""
pass
def stop_entities_detection_job(self, JobId: str) -> Dict:
"""
Stops an entities detection job in progress.
If the job state is ``IN_PROGRESS`` the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the job completes before it can be stopped, it is put into the ``COMPLETED`` state; otherwise the job is stopped and put into the ``STOPPED`` state.
If the job is in the ``COMPLETED`` or ``FAILED`` state when you call the ``StopDominantLanguageDetectionJob`` operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopEntitiesDetectionJob>`_
**Request Syntax**
::
response = client.stop_entities_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier of the entities detection job to stop.
- **JobStatus** *(string) --*
Either ``STOP_REQUESTED`` if the job is currently running, or ``STOPPED`` if the job was previously stopped with the ``StopEntitiesDetectionJob`` operation.
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier of the entities detection job to stop.
:rtype: dict
:returns:
"""
pass
def stop_key_phrases_detection_job(self, JobId: str) -> Dict:
"""
Stops a key phrases detection job in progress.
If the job state is ``IN_PROGRESS`` the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the job completes before it can be stopped, it is put into the ``COMPLETED`` state; otherwise the job is stopped and put into the ``STOPPED`` state.
If the job is in the ``COMPLETED`` or ``FAILED`` state when you call the ``StopDominantLanguageDetectionJob`` operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopKeyPhrasesDetectionJob>`_
**Request Syntax**
::
response = client.stop_key_phrases_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier of the key phrases detection job to stop.
- **JobStatus** *(string) --*
Either ``STOP_REQUESTED`` if the job is currently running, or ``STOPPED`` if the job was previously stopped with the ``StopKeyPhrasesDetectionJob`` operation.
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier of the key phrases detection job to stop.
:rtype: dict
:returns:
"""
pass
def stop_sentiment_detection_job(self, JobId: str) -> Dict:
"""
Stops a sentiment detection job in progress.
If the job state is ``IN_PROGRESS`` the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the job completes before it can be stopped, it is put into the ``COMPLETED`` state; otherwise the job is be stopped and put into the ``STOPPED`` state.
If the job is in the ``COMPLETED`` or ``FAILED`` state when you call the ``StopDominantLanguageDetectionJob`` operation, the operation returns a 400 Internal Request Exception.
When a job is stopped, any documents already processed are written to the output location.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopSentimentDetectionJob>`_
**Request Syntax**
::
response = client.stop_sentiment_detection_job(
JobId='string'
)
**Response Syntax**
::
{
'JobId': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED'
}
**Response Structure**
- *(dict) --*
- **JobId** *(string) --*
The identifier of the sentiment detection job to stop.
- **JobStatus** *(string) --*
Either ``STOP_REQUESTED`` if the job is currently running, or ``STOPPED`` if the job was previously stopped with the ``StopSentimentDetectionJob`` operation.
:type JobId: string
:param JobId: **[REQUIRED]**
The identifier of the sentiment detection job to stop.
:rtype: dict
:returns:
"""
pass
def stop_training_document_classifier(self, DocumentClassifierArn: str) -> Dict:
"""
Stops a document classifier training job while in progress.
If the training job state is ``TRAINING`` , the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the training job completes before it can be stopped, it is put into the ``TRAINED`` ; otherwise the training job is stopped and put into the ``STOPPED`` state and the service sends back an HTTP 200 response with an empty HTTP body.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopTrainingDocumentClassifier>`_
**Request Syntax**
::
response = client.stop_training_document_classifier(
DocumentClassifierArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type DocumentClassifierArn: string
:param DocumentClassifierArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the document classifier currently being trained.
:rtype: dict
:returns:
"""
pass
def stop_training_entity_recognizer(self, EntityRecognizerArn: str) -> Dict:
"""
Stops an entity recognizer training job while in progress.
If the training job state is ``TRAINING`` , the job is marked for termination and put into the ``STOP_REQUESTED`` state. If the training job completes before it can be stopped, it is put into the ``TRAINED`` ; otherwise the training job is stopped and putted into the ``STOPPED`` state and the service sends back an HTTP 200 response with an empty HTTP body.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/StopTrainingEntityRecognizer>`_
**Request Syntax**
::
response = client.stop_training_entity_recognizer(
EntityRecognizerArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type EntityRecognizerArn: string
:param EntityRecognizerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the entity recognizer currently being trained.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, ResourceArn: str, Tags: List) -> Dict:
"""
Associates a specific tag with an Amazon Comprehend resource. A tag is a key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the key might be added to a resource to indicate its use by the sales department.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the given Amazon Comprehend resource to which you want to associate the tags.
:type Tags: list
:param Tags: **[REQUIRED]**
Tags being associated with a specific Amazon Comprehend resource. There can be a maximum of 50 tags (both existing and pending) associated with a specific resource.
- *(dict) --*
A key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with the key-value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by a particular department.
- **Key** *(string) --* **[REQUIRED]**
The initial part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:rtype: dict
:returns:
"""
pass
def untag_resource(self, ResourceArn: str, TagKeys: List) -> Dict:
"""
Removes a specific tag associated with an Amazon Comprehend resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/UntagResource>`_
**Request Syntax**
::
response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the given Amazon Comprehend resource from which you want to remove the tags.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
The initial part of a key-value pair that forms a tag being removed from a given resource. For example, a tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales department. Keys must be unique and cannot be duplicated for a particular resource.
- *(string) --*
:rtype: dict
:returns:
"""
pass
| 65.339532
| 414
| 0.585588
| 26,199
| 231,890
| 5.158632
| 0.028818
| 0.012874
| 0.01212
| 0.01414
| 0.930922
| 0.916405
| 0.907851
| 0.903767
| 0.898514
| 0.893268
| 0
| 0.025505
| 0.325206
| 231,890
| 3,548
| 415
| 65.357948
| 0.838201
| 0.854026
| 0
| 0.462264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.462264
| false
| 0.462264
| 0.066038
| 0
| 0.537736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
1413e22684fcf53cff726acdcd9f9ab6e551b755
| 11,970
|
py
|
Python
|
pyapprox/tests/test_iterative_hard_thresholding.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 26
|
2019-12-16T02:21:15.000Z
|
2022-03-17T09:59:18.000Z
|
pyapprox/tests/test_iterative_hard_thresholding.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 9
|
2020-03-03T03:04:55.000Z
|
2021-08-19T22:50:42.000Z
|
pyapprox/tests/test_iterative_hard_thresholding.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 7
|
2020-03-02T03:49:17.000Z
|
2021-02-17T02:07:53.000Z
|
import unittest
from functools import partial
import copy
import numpy as np
from pyapprox.iterative_hard_thresholding import *
from pyapprox.function_train import *
from pyapprox.univariate_polynomials.orthonormal_recursions import \
jacobi_recurrence
class TestIHT(unittest.TestCase):
def test_gaussian_matrix(self):
np.random.seed(3)
num_samples = 30
sparsity = 3
num_terms = 30
Amatrix = np.random.normal(
0., 1., (num_samples, num_terms))/np.sqrt(num_samples)
true_sol = np.zeros((num_terms))
I = np.random.permutation(num_terms)[:sparsity]
true_sol[I] = np.random.normal(0., 1., (sparsity))
true_sol /= np.linalg.norm(true_sol)
obs = np.dot(Amatrix, true_sol)
def approx_eval(x): return np.dot(Amatrix, x)
def apply_approx_adjoint_jacobian(x, y): return -np.dot(Amatrix.T, y)
project = partial(s_sparse_projection, sparsity=sparsity)
initial_guess = np.zeros_like(true_sol)
tol = 1e-5
max_iter = 100
result = iterative_hard_thresholding(
approx_eval, apply_approx_adjoint_jacobian, project,
obs, initial_guess, tol, max_iter)
sol = result[0]
assert np.allclose(true_sol, sol, atol=10*tol)
def test_random_function_train(self):
np.random.seed(5)
num_vars = 2
degree = 5
rank = 2
sparsity_ratio = 0.2
sample_ratio = .9
ranks = rank*np.ones(num_vars+1, dtype=np.uint64)
ranks[0] = 1
ranks[-1] = 1
alpha = 0
beta = 0
recursion_coeffs = jacobi_recurrence(
degree+1, alpha=alpha, beta=beta, probability=True)
ft_data = generate_random_sparse_function_train(
num_vars, rank, degree+1, sparsity_ratio)
true_sol = ft_data[1]
num_ft_params = true_sol.shape[0]
num_samples = int(sample_ratio*num_ft_params)
samples = np.random.uniform(-1., 1., (num_vars, num_samples))
def function(samples): return evaluate_function_train(
samples, ft_data, recursion_coeffs)
values = function(samples)
assert np.linalg.norm(values) > 0, (np.linalg.norm(values))
num_validation_samples = 100
validation_samples = np.random.uniform(
-1., 1., (num_vars, num_validation_samples))
validation_values = function(validation_samples)
zero_ft_data = copy.deepcopy(ft_data)
zero_ft_data[1] = np.zeros_like(zero_ft_data[1])
# DO NOT use ft_data in following two functions.
# These function only overwrites parameters associated with the
# active indices the rest of the parameters are taken from ft_data.
# If ft_data is used some of the true data will be kept and give
# an unrealisticaly accurate answer
approx_eval = partial(modify_and_evaluate_function_train, samples,
zero_ft_data, recursion_coeffs, None)
apply_approx_adjoint_jacobian = partial(
apply_function_train_adjoint_jacobian, samples, zero_ft_data,
recursion_coeffs, 1e-3)
sparsity = np.where(true_sol != 0)[0].shape[0]
print(('sparsity', sparsity, 'num_samples', num_samples))
# sparse
project = partial(s_sparse_projection, sparsity=sparsity)
# non-linear least squres
#project = partial(s_sparse_projection,sparsity=num_ft_params)
# use uninormative initial guess
#initial_guess = np.zeros_like(true_sol)
# use linear approximation as initial guess
linear_ft_data = ft_linear_least_squares_regression(
samples, values, degree, perturb=None)
initial_guess = linear_ft_data[1]
# use initial guess that is close to true solution
# num_samples required to obtain accruate answer decreases signficantly
# over linear or uniformative guesses. As size of perturbation from
# truth increases num_samples must increase
initial_guess = true_sol.copy()+np.random.normal(0., .1, (num_ft_params))
tol = 5e-3
max_iter = 1000
result = iterative_hard_thresholding(
approx_eval, apply_approx_adjoint_jacobian, project,
values[:, 0], initial_guess, tol, max_iter, verbosity=1)
sol = result[0]
residnorm = result[1]
recovered_ft_data = copy.deepcopy(ft_data)
recovered_ft_data[1] = sol
ft_validation_values = evaluate_function_train(
validation_samples, recovered_ft_data, recursion_coeffs)
validation_error = np.linalg.norm(
validation_values-ft_validation_values)
rel_validation_error = validation_error / \
np.linalg.norm(validation_values)
# compare relative error because exit condition is based upon
# relative residual
assert rel_validation_error < 10*tol, rel_validation_error
# interestingly enough the error in the function can be low
# but the error in the ft parameters can be large
# assert np.allclose(true_sol,sol,atol=10*tol)
class TestOMP(unittest.TestCase):
def test_gaussian_matrix(self):
num_samples = 30
sparsity = 5
num_terms = 30
Amatrix = np.random.normal(
0., 1., (num_samples, num_terms))/np.sqrt(num_samples)
true_sol = np.zeros((num_terms))
I = np.random.permutation(num_terms)[:sparsity]
true_sol[I] = np.random.normal(0., 1., (sparsity))
true_sol /= np.linalg.norm(true_sol)
obs = np.dot(Amatrix, true_sol)
def approx_eval(x): return np.dot(Amatrix, x)
def apply_approx_adjoint_jacobian(x, y): return -np.dot(Amatrix.T, y)
least_squares_regression = \
lambda indices, initial_guess: np.linalg.lstsq(
Amatrix[:, indices], obs, rcond=None)[0]
initial_guess = np.zeros_like(true_sol)
tol = 1e-5
active_indices = None
result = orthogonal_matching_pursuit(
approx_eval, apply_approx_adjoint_jacobian,
least_squares_regression,
obs, active_indices, num_terms, tol, sparsity)
sol = result[0]
assert np.allclose(true_sol, sol, atol=10*tol)
def test_gaussian_matrix_with_initial_active_indices(self):
num_samples = 30
sparsity = 5
num_terms = 30
Amatrix = np.random.normal(
0., 1., (num_samples, num_terms))/np.sqrt(num_samples)
true_sol = np.zeros((num_terms))
I = np.random.permutation(num_terms)[:sparsity]
true_sol[I] = np.random.normal(0., 1., (sparsity))
true_sol /= np.linalg.norm(true_sol)
obs = np.dot(Amatrix, true_sol)
def approx_eval(x): return np.dot(Amatrix, x)
def apply_approx_adjoint_jacobian(x, y): return -np.dot(Amatrix.T, y)
least_squares_regression = \
lambda indices, initial_guess: np.linalg.lstsq(
Amatrix[:, indices], obs, rcond=None)[0]
initial_guess = np.zeros_like(true_sol)
tol = 1e-5
# use first three sparse terms
active_indices = I[:3]
result = orthogonal_matching_pursuit(
approx_eval, apply_approx_adjoint_jacobian,
least_squares_regression,
obs, active_indices, num_terms, tol, sparsity)
sol = result[0]
assert np.allclose(true_sol, sol, atol=10*tol)
def test_sparse_function_train(self):
np.random.seed(5)
num_vars = 2
degree = 5
rank = 2
tol = 1e-5
sparsity_ratio = 0.2
sample_ratio = 0.6
ranks = rank*np.ones(num_vars+1, dtype=np.uint64)
ranks[0] = 1
ranks[-1] = 1
alpha = 0
beta = 0
recursion_coeffs = jacobi_recurrence(
degree+1, alpha=alpha, beta=beta, probability=True)
ft_data = generate_random_sparse_function_train(
num_vars, rank, degree+1, sparsity_ratio)
true_sol = ft_data[1]
num_ft_params = true_sol.shape[0]
num_samples = int(sample_ratio*num_ft_params)
samples = np.random.uniform(-1., 1., (num_vars, num_samples))
def function(samples): return evaluate_function_train(
samples, ft_data, recursion_coeffs)
#function = lambda samples: np.cos(samples.sum(axis=0))[:,np.newaxis]
values = function(samples)
print(values.shape)
assert np.linalg.norm(values) > 0, (np.linalg.norm(values))
num_validation_samples = 100
validation_samples = np.random.uniform(
-1., 1., (num_vars, num_validation_samples))
validation_values = function(validation_samples)
zero_ft_data = copy.deepcopy(ft_data)
zero_ft_data[1] = np.zeros_like(zero_ft_data[1])
# DO NOT use ft_data in following two functions.
# These function only overwrites parameters associated with the
# active indices the rest of the parameters are taken from ft_data.
# If ft_data is used some of the true data will be kept and give
# an unrealisticaly accurate answer
approx_eval = partial(modify_and_evaluate_function_train, samples,
zero_ft_data, recursion_coeffs, None)
apply_approx_adjoint_jacobian = partial(
apply_function_train_adjoint_jacobian, samples, zero_ft_data,
recursion_coeffs, 1e-3)
def least_squares_regression(indices, initial_guess):
# if initial_guess is None:
st0 = np.random.get_state()
np.random.seed(1)
initial_guess = np.random.normal(0., .01, indices.shape[0])
np.random.set_state(st0)
result = ft_non_linear_least_squares_regression(
samples, values, ft_data, recursion_coeffs, initial_guess,
indices, {'gtol': tol, 'ftol': tol, 'xtol': tol, 'verbosity': 0})
return result[indices]
sparsity = np.where(true_sol != 0)[0].shape[0]
print(('sparsity', sparsity, 'num_samples', num_samples,
'num_ft_params', num_ft_params))
print(true_sol)
active_indices = None
use_omp = True
#use_omp = False
if not use_omp:
sol = least_squares_regression(np.arange(num_ft_params), None)
else:
result = orthogonal_matching_pursuit(
approx_eval, apply_approx_adjoint_jacobian,
least_squares_regression, values[:, 0], active_indices,
num_ft_params, tol, min(num_samples, num_ft_params), verbosity=1)
sol = result[0]
residnorm = result[1]
recovered_ft_data = copy.deepcopy(ft_data)
recovered_ft_data[1] = sol
ft_validation_values = evaluate_function_train(
validation_samples, recovered_ft_data, recursion_coeffs)
validation_error = np.linalg.norm(
validation_values-ft_validation_values)
rel_validation_error = validation_error / \
np.linalg.norm(validation_values)
# compare relative error because exit condition is based upon
# relative residual
print(rel_validation_error)
assert rel_validation_error < 100*tol, rel_validation_error
# interestingly enough the error in the function can be low
# but the error in the ft parameters can be large
# print np.where(true_sol!=0)[0]
# print np.where(sol!=0)[0]
#assert np.allclose(true_sol,sol,atol=100*tol)
if __name__ == "__main__":
iht_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestIHT)
# unittest.TextTestRunner(verbosity=2).run(iht_test_suite)
omp_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestOMP)
# unittest.TextTestRunner(verbosity=2).run(omp_test_suite)
unittest.main()
| 38
| 81
| 0.642523
| 1,524
| 11,970
| 4.782152
| 0.14895
| 0.028814
| 0.018112
| 0.035675
| 0.800768
| 0.760977
| 0.732437
| 0.70472
| 0.700467
| 0.695664
| 0
| 0.019006
| 0.270343
| 11,970
| 314
| 82
| 38.121019
| 0.815434
| 0.145447
| 0
| 0.743119
| 0
| 0
| 0.007851
| 0
| 0
| 0
| 0
| 0
| 0.03211
| 1
| 0.06422
| false
| 0
| 0.03211
| 0.036697
| 0.110092
| 0.022936
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
142adcf9ba0fa0e19a9d1a229ad61f1a66176627
| 803
|
py
|
Python
|
DataProcess/ClearText/zhline.py
|
yongfang117/data_process
|
c77af1b336ec8b7f61b538ea43dd03ee005a5227
|
[
"MIT"
] | null | null | null |
DataProcess/ClearText/zhline.py
|
yongfang117/data_process
|
c77af1b336ec8b7f61b538ea43dd03ee005a5227
|
[
"MIT"
] | null | null | null |
DataProcess/ClearText/zhline.py
|
yongfang117/data_process
|
c77af1b336ec8b7f61b538ea43dd03ee005a5227
|
[
"MIT"
] | null | null | null |
# coding=utf8
from zhtools.langconv import *
# 转换繁体到简体
str1 = '上港5-4恒大5分领跑剑指冠军,下轮打平便可夺冠,武磊平纪录—广州恒大淘宝 上海上港 蔡慧康 武磊 胡尔克 张成林 阿兰 保利尼奥 王燊超 吕文君 懂球帝北京时间11月3日19:35,中超第28轮迎来天王山之战,广州恒大淘宝坐镇主场迎战上海上港。上半场吕文君和蔡慧康先后进球两度为上港取得领先,保利尼奥和阿兰两度为恒大将比分扳平,补时阶段保利尼奥进球反超比分,下半场武磊进球追平李金羽单赛季进球纪录,王燊超造成张成林乌龙,胡尔克点射破门,阿兰补时打进点球。最终,上海上港客场5-4战胜广州恒大淘宝,赛季双杀恒大同时也将积分榜上的领先优势扩大到五分,上港下轮只要战平就将夺得冠军。'
line1 = Converter('zh-hans').convert(str1)
print('繁体->简体:\n',line1)
# 转换简体到繁体
str2 =r'上港5-4恒大5分领跑剑指冠军,下轮打平便可夺冠,武磊平纪录—广州恒大淘宝 上海上港 蔡慧康 武磊 胡尔克 张成林 阿兰 保利尼奥 王燊超 吕文君 懂球帝北京时间11月3日19:35,中超第28轮迎来天王山之战,广州恒大淘宝坐镇主场迎战上海上港。上半场吕文君和蔡慧康先后进球两度为上港取得领先,保利尼奥和阿兰两度为恒大将比分扳平,补时阶段保利尼奥进球反超比分,下半场武磊进球追平李金羽单赛季进球纪录,王燊超造成张成林乌龙,胡尔克点射破门,阿兰补时打进点球。最终,上海上港客场5-4战胜广州恒大淘宝,赛季双杀恒大同时也将积分榜上的领先优势扩大到五分,上港下轮只要战平就将夺得冠军。'
line2 = Converter('zh-hant').convert(str2)
print('简体->繁体:\n',line2)
| 53.533333
| 298
| 0.83188
| 95
| 803
| 7.052632
| 0.557895
| 0.041791
| 0.065672
| 0.080597
| 0.776119
| 0.776119
| 0.776119
| 0.776119
| 0.776119
| 0.776119
| 0
| 0.048942
| 0.058531
| 803
| 14
| 299
| 57.357143
| 0.834656
| 0.033624
| 0
| 0
| 0
| 0.285714
| 0.790155
| 0.65285
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1465c19b06734b74fe63674d54f8fb6c492420da
| 80
|
py
|
Python
|
api/base/__init__.py
|
felliott/SHARE
|
8fd60ff4749349c9b867f6188650d71f4f0a1a56
|
[
"Apache-2.0"
] | 87
|
2015-01-06T18:24:45.000Z
|
2021-08-08T07:59:40.000Z
|
api/base/__init__.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 442
|
2015-01-01T19:16:01.000Z
|
2022-03-30T21:10:26.000Z
|
api/base/__init__.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 67
|
2015-03-10T16:32:58.000Z
|
2021-11-12T16:33:41.000Z
|
from api.base.serializers import * # noqa
from api.base.views import * # noqa
| 26.666667
| 42
| 0.725
| 12
| 80
| 4.833333
| 0.583333
| 0.241379
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 80
| 2
| 43
| 40
| 0.878788
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
148e336e2aec475523387e6f42159267cfa873ba
| 2,939
|
py
|
Python
|
src/mnist_nn.py
|
euske/introdl
|
f6d9da71c7172952e9b5872502293dbb41eb7d93
|
[
"CC-BY-4.0"
] | 14
|
2022-03-07T02:34:18.000Z
|
2022-03-23T06:34:54.000Z
|
src/mnist_nn.py
|
euske/introdl
|
f6d9da71c7172952e9b5872502293dbb41eb7d93
|
[
"CC-BY-4.0"
] | null | null | null |
src/mnist_nn.py
|
euske/introdl
|
f6d9da71c7172952e9b5872502293dbb41eb7d93
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
from nn_numpy import Layer
from nn_numpy import SoftmaxLayer
from mnist import load_mnist
np.random.seed(1)
def main1():
# 訓練データの画像・ラベルを読み込む (パス名は適宜変更)。
train_images = load_mnist('./MNIST/train-images-idx3-ubyte.gz')
train_labels = load_mnist('./MNIST/train-labels-idx1-ubyte.gz')
# レイヤーを 2つ作成。
layer1 = Layer(784, 100)
layer2 = Layer(100, 10)
n = 0
for i in range(1):
for (image,label) in zip(train_images, train_labels):
# 28×28の画像をフラットな配列に変換。
x = (image/255).reshape(784)
# 正解部分だけが 1 になっている 10要素の配列を作成。
ya = np.zeros(10)
ya[label] = 1
# 学習させる。
y = layer1.forward(x)
y = layer2.forward(y)
delta = layer2.mse_loss(ya)
delta = layer2.backward(delta)
delta = layer1.backward(delta)
n += 1
if (n % 50 == 0):
print(n, layer2.loss)
layer1.update(0.01)
layer2.update(0.01)
test_images = load_mnist('./MNIST/t10k-images-idx3-ubyte.gz')
test_labels = load_mnist('./MNIST/t10k-labels-idx1-ubyte.gz')
correct = 0
for (image,label) in zip(test_images, test_labels):
x = (image/255).flatten()
y = layer1.forward(x)
y = layer2.forward(y)
i = np.argmax(y)
if i == label:
correct += 1
print(correct, len(test_images))
def main2():
# 訓練データの画像・ラベルを読み込む (パス名は適宜変更)。
train_images = load_mnist('./MNIST/train-images-idx3-ubyte.gz')
train_labels = load_mnist('./MNIST/train-labels-idx1-ubyte.gz')
# レイヤーを 3つ作成。
layer1 = Layer(784, 100)
layerx = Layer(100, 100)
layer2 = SoftmaxLayer(100, 10)
n = 0
for i in range(1):
for (image,label) in zip(train_images, train_labels):
# 28×28の画像をフラットな配列に変換。
x = (image/255).reshape(784)
# 正解部分だけが 1 になっている 10要素の配列を作成。
ya = np.zeros(10)
ya[label] = 1
# 学習させる。
y = layer1.forward(x)
y = layerx.forward(y)
y = layer2.forward(y)
delta = layer2.cross_entropy_loss_backward(ya)
delta = layerx.backward(delta)
delta = layer1.backward(delta)
n += 1
if (n % 50 == 0):
print(n, layer2.loss)
layer1.update(0.01)
layerx.update(0.01)
layer2.update(0.01)
test_images = load_mnist('./MNIST/t10k-images-idx3-ubyte.gz')
test_labels = load_mnist('./MNIST/t10k-labels-idx1-ubyte.gz')
correct = 0
for (image,label) in zip(test_images, test_labels):
x = (image/255).flatten()
y = layer1.forward(x)
y = layerx.forward(y)
y = layer2.forward(y)
i = np.argmax(y)
if i == label:
correct += 1
print(correct, len(test_images))
main2()
| 33.022472
| 67
| 0.553249
| 382
| 2,939
| 4.180628
| 0.209424
| 0.05072
| 0.070132
| 0.050094
| 0.81841
| 0.81841
| 0.804634
| 0.804634
| 0.795867
| 0.795867
| 0
| 0.070894
| 0.318476
| 2,939
| 88
| 68
| 33.397727
| 0.724413
| 0.074175
| 0
| 0.783784
| 0
| 0
| 0.098893
| 0.098893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.054054
| 0
| 0.081081
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
14bb5066551cc8af8bcf666702b096f6f441eb98
| 46,303
|
py
|
Python
|
AutotestWebD/all_models_for_dubbo/migrations/0001_initial.py
|
ltx100/sosotest_opensource
|
57f2312bd4c43575046b0d2763ab5621af4144dd
|
[
"MIT"
] | 1
|
2022-03-31T02:41:53.000Z
|
2022-03-31T02:41:53.000Z
|
AutotestWebD/all_models_for_dubbo/migrations/0001_initial.py
|
ltx100/sosotest_opensource
|
57f2312bd4c43575046b0d2763ab5621af4144dd
|
[
"MIT"
] | null | null | null |
AutotestWebD/all_models_for_dubbo/migrations/0001_initial.py
|
ltx100/sosotest_opensource
|
57f2312bd4c43575046b0d2763ab5621af4144dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-04-15 14:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('all_models', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tb0ErrorLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='UNTITLED', max_length=100, verbose_name='Log标题')),
('errorLogText', models.TextField(db_column='errorLogText', default='', verbose_name='Log的文本')),
('logLevel', models.IntegerField(default=10, verbose_name='级别')),
('state', models.IntegerField(choices=[(1, '未解决'), (0, '已解决')], default=1, verbose_name='状态 0已解决 1未解决')),
('addBy', models.CharField(db_column='addBy', default=None, max_length=25, null=True, verbose_name='添加者登录名')),
('modBy', models.CharField(db_column='modBy', default=None, max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
],
options={
'verbose_name': '00系统错误日志表',
'verbose_name_plural': '00系统错误日志表',
'db_table': 'tb0_error_log',
},
),
migrations.CreateModel(
name='Tb2DubboBatchTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('businessLine', models.CharField(db_column='businessLine', max_length=200, verbose_name='业务线')),
('httpConfKey', models.CharField(db_column='httpConfKey', max_length=20, verbose_name='执行环境的httpConfKey')),
('taskLevel', models.IntegerField(db_column='taskLevel', default='9', verbose_name='任务优先级,0高,5中,9低')),
('caseLevel', models.IntegerField(db_column='caseLevel', verbose_name='执行任务中case的优先级,0高,5中,9低')),
('taskIdList', models.TextField(db_column='taskIdList', verbose_name='本次批量执行哪些任务')),
('status', models.IntegerField(db_column='status', verbose_name='执行状态: NOTRUN = 1 RUNNING = 2 DONE = 3 EXCEPTION = 4 CANCELING = 10 CANCELED = 11')),
('isSendEmail', models.IntegerField(db_column='isSendEmail', default=0, verbose_name='是否发送邮件[是否发送:是否带附件:PASS是否发送:FAIL是否发送:ERROR是否发送:EXCEPTION是否发送]0的时候不发送,1开头的时候依次往后判断即可后面没有的都是1,例如11标识发送带附件所有情况都发送10标识发送不带附件所有情况都发送100标识发送不带附件成功不发送其他情况发送')),
('isCodeRate', models.IntegerField(db_column='isCodeRate', default=0, verbose_name='是否生成代码覆盖率 1生成 0不生成')),
('isSaveHistory', models.IntegerField(db_column='isSaveHistory', default=0, verbose_name='是否保存到历史记录 1保存 0不保存')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='测试结果 根据断言结果生成的测试结果 PASS/FAIL/ERROR/EXCEPTION/CANCEL')),
('executeMsg', models.TextField(db_column='executeMsg', default='[]', verbose_name='测试过程中产生的信息')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('addBy', models.CharField(db_column='addBy', max_length=25, verbose_name='创建者登录名')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
],
options={
'verbose_name': '914DUBBO任务批量执行',
'verbose_name_plural': '914DUBBO任务批量执行',
'db_table': 'tb2_dubbo_batch_execute_task',
},
),
migrations.CreateModel(
name='Tb2DubboInterface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interfaceId', models.CharField(db_column='interfaceId', max_length=25, unique=True, verbose_name='接口ID,例如DUBBO_INTERFACE_1')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('casedesc', models.TextField(default='', verbose_name='描述')),
('caselevel', models.IntegerField(default=5, verbose_name='用例优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('status', models.IntegerField(default=2, verbose_name='用例状态,1新建待审核 2审核通过 3审核未通过')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('varsPre', models.TextField(db_column='varsPre', default='', verbose_name='前置变量')),
('dubboSystem', models.CharField(db_column='dubboSystem', max_length=100, verbose_name='dubbo的project名称,比如mls-biz-support')),
('dubboService', models.CharField(db_column='dubboService', max_length=200, verbose_name='dubbo的service全路径,比如com.lianjia.mls.business.quality.facade.SharePoolHouseFacade')),
('dubboMethod', models.CharField(db_column='dubboMethod', max_length=100, verbose_name='dubbo的service中的具体method')),
('dubboParams', models.TextField(verbose_name='Dubbo invoke时请求的参数,多个params中间用半角逗号间隔')),
('encoding', models.CharField(db_column='encoding', default='gb18030', max_length=10, verbose_name='dubbo的service中的编码方式')),
('timeout', models.IntegerField(default=20, verbose_name='超时时间,单位秒')),
('varsPost', models.TextField(db_column='varsPost', verbose_name='后置变量')),
('state', models.IntegerField(choices=[(1, '有效'), (0, '无效')], default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboInterfaceAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'db_table': 'tb2_dubbo_interface',
},
),
migrations.CreateModel(
name='Tb2DubboInterfaceDebug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interfaceId', models.CharField(db_column='interfaceId', max_length=25, verbose_name='接口ID,例如DUBBO_INTERFACE_1')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('casedesc', models.TextField(default='', verbose_name='描述')),
('caselevel', models.IntegerField(default=5, verbose_name='用例优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('status', models.IntegerField(default=2, verbose_name='用例状态,1新建待审核 2审核通过 3审核未通过')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('varsPre', models.TextField(db_column='varsPre', default='', verbose_name='前置变量')),
('dubboSystem', models.CharField(db_column='dubboSystem', max_length=100, verbose_name='dubbo的project名称,比如mls-biz-support')),
('dubboService', models.CharField(db_column='dubboService', max_length=200, verbose_name='dubbo的service全路径,比如com.lianjia.mls.business.quality.facade.SharePoolHouseFacade')),
('dubboMethod', models.CharField(db_column='dubboMethod', max_length=100, verbose_name='dubbo的service中的具体method')),
('dubboParams', models.TextField(verbose_name='Dubbo invoke时请求的参数,多个params中间用半角逗号间隔')),
('encoding', models.CharField(db_column='encoding', default='gb18030', max_length=10, verbose_name='dubbo的service中的编码方式')),
('timeout', models.IntegerField(default=20, verbose_name='超时时间,单位秒')),
('varsPost', models.TextField(db_column='varsPost', verbose_name='后置变量')),
('execStatus', models.IntegerField(db_column='execStatus', default=1, verbose_name='执行状态')),
('actualResult', models.TextField(blank=True, db_column='actualResult', default='', verbose_name='实际结果')),
('assertResult', models.TextField(blank=True, db_column='assertResult', default='', verbose_name='断言结果')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='执行结果')),
('beforeExecuteTakeTime', models.IntegerField(db_column='beforeExecuteTakeTime', default=0, verbose_name='执行前耗时')),
('afterExecuteTakeTime', models.IntegerField(db_column='afterExecuteTakeTime', default=0, verbose_name='执行后耗时')),
('executeTakeTime', models.IntegerField(db_column='executeTakeTime', default=0, verbose_name='执行耗时')),
('totalTakeTime', models.IntegerField(db_column='totalTakeTime', default=0, verbose_name='总耗时')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(choices=[(1, '有效'), (0, '无效')], default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboInterfaceDebugAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('httpConfKey', models.ForeignKey(db_column='httpConfKey', max_length=20, on_delete=django.db.models.deletion.CASCADE, to='all_models.TbConfigHttp', to_field='httpConfKey', verbose_name='执行环境的httpConfKey')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'db_table': 'tb2_dubbo_interface_debug',
},
),
migrations.CreateModel(
name='Tb2DubboInterfaceExecuteHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interfaceUrl', models.CharField(db_column='interfaceUrl', max_length=200, verbose_name='请求的接口URL')),
('requestHost', models.CharField(db_column='requestHost', max_length=200, verbose_name='请求的主机地址,例如HTTP://test.domain.com')),
('totalCount', models.IntegerField(db_column='totalCount', verbose_name='共执行次数统计')),
('passCount', models.IntegerField(db_column='passCount', verbose_name='通过次数统计')),
('failCount', models.IntegerField(db_column='failCount', verbose_name='失败次数统计')),
('errorCount', models.IntegerField(db_column='errorCount', verbose_name='错误次数统计')),
('exceptionCount', models.IntegerField(db_column='exceptionCount', verbose_name='异常次数统计')),
('taskId', models.CharField(db_column='taskId', max_length=25, verbose_name='执行的任务ID')),
('title', models.CharField(max_length=100, verbose_name='任务标题')),
('taskdesc', models.CharField(max_length=1000, verbose_name='任务描述')),
('protocol', models.CharField(max_length=20, verbose_name='任务协议')),
('testReportUrl', models.CharField(db_column='testReportUrl', max_length=200, verbose_name='报告路径')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboInterfaceExecuteHistoryAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('execBy', models.ForeignKey(blank=True, db_column='execBy', default='', max_length=30, on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboInterfaceExecuteHistoryExecBy', to='all_models.TbUser', to_field='loginName', verbose_name='执行人登录用户名')),
('httpConfKey', models.ForeignKey(db_column='httpConfKey', max_length=20, on_delete=django.db.models.deletion.CASCADE, to='all_models.TbConfigHttp', to_field='httpConfKey', verbose_name='执行环境的httpConfKey')),
],
options={
'verbose_name': '913DUBBO任务接口执行历史',
'verbose_name_plural': '913DUBBO任务接口执行历史',
'db_table': 'tb2_dubbo_interface_execute_history',
},
),
migrations.CreateModel(
name='Tb2DubboQuickDebug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('requestAddr', models.CharField(db_column='requestAddr', max_length=200, verbose_name='快速调试的请求地址')),
('dubboService', models.CharField(db_column='dubboService', max_length=200, verbose_name='dubbo的service全路径,比如com.lianjia.mls.business.quality.facade.SharePoolHouseFacade')),
('dubboMethod', models.CharField(db_column='dubboMethod', max_length=100, verbose_name='dubbo的service中的具体method')),
('dubboParams', models.TextField(verbose_name='Dubbo invoke时请求的参数,多个params中间用半角逗号间隔')),
('encoding', models.CharField(db_column='encoding', default='gb18030', max_length=10, verbose_name='dubbo的service中的编码方式')),
('actualResult', models.TextField(blank=True, db_column='actualResult', default='', verbose_name='实际结果')),
('executeTakeTime', models.IntegerField(db_column='executeTakeTime', default=0, verbose_name='执行耗时')),
('state', models.IntegerField(choices=[(1, '有效'), (0, '无效')], default=1, verbose_name='状态 0删除 1有效')),
('addBy', models.CharField(db_column='addBy', max_length=25, null=True, verbose_name='创建者登录名')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
],
options={
'db_table': 'tb2_dubbo_quick_debug',
},
),
migrations.CreateModel(
name='Tb2DubboTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('taskId', models.CharField(db_column='taskId', max_length=25, unique=True, verbose_name='任务ID')),
('title', models.CharField(max_length=100, verbose_name='任务标题')),
('taskdesc', models.CharField(max_length=1000, verbose_name='任务描述')),
('protocol', models.CharField(default='DUBBO', max_length=20, verbose_name='任务协议')),
('businessLineGroup', models.CharField(db_column='businessLineGroup', max_length=1000, verbose_name='任务包含的业务线名称,例如 SFA,服务云')),
('modulesGroup', models.CharField(db_column='modulesGroup', max_length=1000, verbose_name='任务包含的模块名称,例如 合同,订单')),
('emailList', models.CharField(db_column='emailList', default='', max_length=2000, verbose_name='发送邮件列表,除却执行人execBy以外的其他收件人')),
('taskLevel', models.IntegerField(db_column='taskLevel', default=5, verbose_name='优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('highPriorityVARS', models.TextField(db_column='highPriorityVARS', default='', verbose_name='高优先级变量,执行时覆盖同名的变量和全局变量')),
('status', models.IntegerField(default=2, verbose_name='状态,1新建待审核 2审核通过 3审核未通过')),
('interfaceCount', models.IntegerField(db_column='interfaceCount', verbose_name='任务中的接口数量统计')),
('taskInterfaces', models.TextField(db_column='taskInterfaces', verbose_name='任务中的接口列表,多个接口用,间隔,例如 HTTP_INTERFACE_1,HTTP_INTERFACE_2')),
('caseCount', models.IntegerField(db_column='caseCount', verbose_name='任务中的用例数量统计')),
('taskTestcases', models.TextField(db_column='taskTestcases', verbose_name='任务中的用例列表,多个接口用,间隔,例如 HTTP_TESTCASE_1,HTTP_TESTCASE_2')),
('interfaceNum', models.IntegerField(db_column='interfaceNum', verbose_name='任务总的接口数量,包含接口的和用例中的步骤数量')),
('isCI', models.IntegerField(db_column='isCI', default=1, verbose_name='是否加入到持续集成 0 不加人 1加入')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTaskAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
],
options={
'verbose_name': '911DUBBO任务表',
'verbose_name_plural': '911DUBBO任务表',
'db_table': 'tb2_dubbo_task',
},
),
migrations.CreateModel(
name='Tb2DubboTaskExecute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('taskId', models.CharField(db_column='taskId', max_length=100, verbose_name='要执行的任务ID')),
('title', models.CharField(max_length=100, verbose_name='任务标题')),
('taskdesc', models.CharField(max_length=1000, verbose_name='任务描述')),
('protocol', models.CharField(max_length=20, verbose_name='任务协议')),
('businessLineGroup', models.CharField(db_column='businessLineGroup', max_length=1000, verbose_name='任务包含的业务线名称,例如 SFA,服务云')),
('modulesGroup', models.CharField(db_column='modulesGroup', max_length=1000, verbose_name='任务包含的模块名称,例如 合同,订单')),
('taskLevel', models.IntegerField(db_column='taskLevel', default=5, verbose_name='优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('status', models.IntegerField(default=2, verbose_name='状态,1新建待审核 2审核通过 3审核未通过')),
('highPriorityVARS', models.TextField(db_column='highPriorityVARS', default='', verbose_name='高优先级变量,执行时覆盖同名的变量和全局变量')),
('interfaceCount', models.IntegerField(db_column='interfaceCount', verbose_name='任务中的接口数量统计')),
('taskInterfaces', models.TextField(db_column='taskInterfaces', verbose_name='任务中的接口列表,多个接口用,间隔,例如 HTTP_INTERFACE_1,HTTP_INTERFACE_2')),
('caseCount', models.IntegerField(db_column='caseCount', verbose_name='任务中的用例数量统计')),
('taskTestcases', models.TextField(db_column='taskTestcases', verbose_name='任务中的用例列表,多个接口用,间隔,例如 HTTP_TESTCASE_1,HTTP_TESTCASE_2')),
('interfaceNum', models.IntegerField(db_column='interfaceNum', verbose_name='任务总的接口数量,包含接口的和用例中的步骤数量')),
('isCI', models.IntegerField(db_column='isCI', default=1, verbose_name='是否加入到持续集成 0 不加人 1加入')),
('caseLevel', models.IntegerField(db_column='caseLevel', default=100, verbose_name='执行时选择的执行优先级,如果选择了,那么只有同等优先级的case会执行,0高 5中 9低')),
('isSendEmail', models.IntegerField(db_column='isSendEmail', default=0, verbose_name='是否发送邮件[是否发送:是否带附件:PASS是否发送:FAIL是否发送:ERROR是否发送:EXCEPTION是否发送]0的时候不发送,1开头的时候依次往后判断即可后面没有的都是1,例如11标识发送带附件所有情况都发送10标识发送不带附件所有情况都发送100标识发送不带附件成功不发送其他情况发送')),
('emailList', models.CharField(db_column='emailList', default='', max_length=2000, verbose_name='发送邮件列表,除却执行人execBy以外的其他收件人')),
('isCodeRate', models.IntegerField(db_column='isCodeRate', default=0, verbose_name='是否生成代码覆盖率 1生成 0不生成')),
('isSaveHistory', models.IntegerField(db_column='isSaveHistory', default=0, verbose_name='是否保存到历史记录 1保存 0不保存')),
('execComments', models.CharField(db_column='execComments', max_length=400, verbose_name='执行备注信息')),
('retryCount', models.IntegerField(db_column='retryCount', default=0, verbose_name='重试次数,默认0,不重试')),
('execType', models.IntegerField(blank=True, db_column='execType', default=1, verbose_name='执行类型,1立即执行 2定时执行 3周期执行')),
('execTime', models.DateTimeField(db_column='execTime', default='2000-01-01 00:00:01', verbose_name='执行开始时间,默认当前时间')),
('execFinishTime', models.DateTimeField(db_column='execFinishTime', default='2000-01-01 00:00:01', verbose_name='执行结束时间')),
('execTakeTime', models.IntegerField(db_column='execTakeTime', default=0, verbose_name='执行耗时')),
('execStatus', models.IntegerField(db_column='execStatus', default=1, verbose_name='执行状态: NOTRUN = 1 RUNNING = 2 DONE = 3 EXCEPTION = 4 CANCELING = 10 CANCELED = 11')),
('execProgressData', models.CharField(db_column='execProgressData', default='0:0:0:0:0', max_length=30, verbose_name='执行进度数据,格式:ALL:PASS:FAIL:ERROR:NOTRUN,例如任务有10个用例,10:3:1:0:6,代表总共10个,通过3个,失败1个,错误0个,未执行6个。')),
('execPlatform', models.IntegerField(db_column='execPlatform', default=1, verbose_name='调用接口的平台,1代表测试平台,2代表jenkins,100代表其他')),
('execLevel', models.IntegerField(db_column='execLevel', default=5, verbose_name='优先级 5默认 数字越小优先级越高 范围1-10')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='测试结果 根据断言结果生成的测试结果 PASS/FAIL/ERROR/EXCEPTION/CANCEL')),
('testResultMsg', models.TextField(db_column='testResultMsg', verbose_name='任务执行的统计信息,详细统计,json字符串形式保存。')),
('testReportUrl', models.CharField(db_column='testReportUrl', max_length=200, verbose_name='测试报告链接')),
('taskSuiteExecuteId', models.IntegerField(db_column='taskSuiteExecuteId', default='0', verbose_name='任务集执行Id')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTaskExecuteAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('execBy', models.ForeignKey(blank=True, db_column='execBy', default='', max_length=30, on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTaskExecuteExecBy', to='all_models.TbUser', to_field='loginName', verbose_name='执行人登录用户名')),
('httpConfKey', models.ForeignKey(db_column='httpConfKey', max_length=20, on_delete=django.db.models.deletion.CASCADE, to='all_models.TbConfigHttp', to_field='httpConfKey', verbose_name='执行环境的httpConfKey')),
],
options={
'verbose_name': '912DUBBO任务执行',
'verbose_name_plural': '912DUBBO任务执行',
'db_table': 'tb2_dubbo_task_execute',
},
),
migrations.CreateModel(
name='Tb2DUBBOTaskSuite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('taskSuiteId', models.CharField(db_column='taskSuiteId', max_length=25, unique=True, verbose_name='任务ID')),
('title', models.CharField(db_column='title', max_length=100, verbose_name='任务集标题')),
('taskSuiteDesc', models.CharField(db_column='taskSuiteDesc', max_length=1000, verbose_name='任务集描述')),
('protocol', models.CharField(db_column='protocol', max_length=20, verbose_name='任务集协议')),
('emailList', models.CharField(db_column='emailList', default='', max_length=2000, verbose_name='发送邮件列表,除却执行人execBy以外的其他收件人')),
('status', models.IntegerField(db_column='status', default=2, verbose_name='状态,1新建待审核 2审核通过 3审核未通过')),
('taskCount', models.IntegerField(db_column='taskCount', verbose_name='任务集中的任务列表')),
('taskList', models.TextField(db_column='taskList', verbose_name='任务集中的任务列表')),
('isCI', models.IntegerField(db_column='isCI', default=0, verbose_name='是否加入到持续集成 0 不加人 1加入')),
('state', models.IntegerField(db_column='state', default=1, verbose_name='状态 0删除 1有效')),
('addBy', models.CharField(db_column='addBy', max_length=25, null=True, verbose_name='创建者登录名')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
],
options={
'verbose_name': '任务集',
'db_table': 'tb2_dubbo_task_suite',
},
),
migrations.CreateModel(
name='Tb2DUBBOTaskSuiteExecute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('taskSuiteId', models.CharField(db_column='taskSuiteId', max_length=25, verbose_name='任务ID')),
('title', models.CharField(db_column='title', max_length=100, verbose_name='任务标题')),
('taskSuiteDesc', models.CharField(db_column='taskSuiteDesc', max_length=1000, verbose_name='任务描述')),
('protocol', models.CharField(db_column='protocol', max_length=20, verbose_name='任务协议')),
('status', models.IntegerField(default=2, verbose_name='状态,1新建待审核 2审核通过 3审核未通过')),
('taskCount', models.IntegerField(db_column='taskCount', verbose_name='任务集中的任务列表')),
('taskList', models.CharField(db_column='taskList', max_length=300, verbose_name='任务集中的任务列表')),
('isCI', models.IntegerField(db_column='isCI', default=0, verbose_name='是否加入到持续集成 0 不加人 1加入')),
('httpConfKeyList', models.CharField(db_column='httpConfKeyList', max_length=300, verbose_name='任务集包含的执行环境')),
('httpConfKeyAliasList', models.CharField(db_column='httpConfKeyAliasList', max_length=300, verbose_name='任务集包含的执行环境名称')),
('caseLevel', models.IntegerField(db_column='caseLevel', default=100, verbose_name='执行时选择的执行优先级,如果选择了,那么只有同等优先级的case会执行,0高 5中 9低')),
('isSendEmail', models.IntegerField(db_column='isSendEmail', default=0, verbose_name='是否发送邮件[是否发送:是否带附件:PASS是否发送:FAIL是否发送:ERROR是否发送:EXCEPTION是否发送]0的时候不发送,1开头的时候依次往后判断即可后面没有的都是1,例如11标识发送带附件所有情况都发送10标识发送不带附件所有情况都发送100标识发送不带附件成功不发送其他情况发送')),
('emailList', models.CharField(db_column='emailList', default='', max_length=2000, verbose_name='发送邮件列表,除却执行人execBy以外的其他收件人')),
('isCodeRate', models.IntegerField(db_column='isCodeRate', default=0, verbose_name='是否生成代码覆盖率 1生成 0不生成')),
('isSaveHistory', models.IntegerField(db_column='isSaveHistory', default=0, verbose_name='是否保存到历史记录 1保存 0不保存')),
('execComments', models.CharField(db_column='execComments', max_length=400, verbose_name='执行备注信息')),
('retryCount', models.IntegerField(db_column='retryCount', default=0, verbose_name='重试次数,默认0,不重试')),
('execType', models.IntegerField(blank=True, db_column='execType', default=1, verbose_name='执行类型,1立即执行 2定时执行 3周期执行')),
('execTime', models.DateTimeField(db_column='execTime', default='2000-01-01 00:00:01', verbose_name='执行开始时间,默认当前时间')),
('execFinishTime', models.DateTimeField(db_column='execFinishTime', default='2000-01-01 00:00:01', verbose_name='执行结束时间')),
('execTakeTime', models.IntegerField(db_column='execTakeTime', default=0, verbose_name='执行耗时')),
('execBy', models.CharField(db_column='execBy', default='', max_length=30, verbose_name='执行人登录用户名')),
('execStatus', models.IntegerField(db_column='execStatus', default=1, verbose_name='执行状态: NOTRUN = 1 RUNNING = 2 DONE = 3 EXCEPTION = 4 CANCELING = 10 CANCELED = 11')),
('execProgressData', models.CharField(db_column='execProgressData', default='0:0:0:0:0', max_length=30, verbose_name='执行进度数据,格式:ALL:PASS:FAIL:ERROR:NOTRUN,例如任务有10个用例,10:3:1:0:6,代表总共10个,通过3个,失败1个,错误0个,未执行6个。 ')),
('execPlatform', models.IntegerField(db_column='execPlatform', default=1, verbose_name='调用接口的平台,1代表测试平台,2代表jenkins,100代表其他')),
('execLevel', models.IntegerField(db_column='execLevel', default=5, verbose_name='优先级 5默认 数字越小优先级越高 范围1-10')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='测试结果 根据断言结果生成的测试结果 PASS/FAIL/ERROR/EXCEPTION/CANCEL')),
('testResultMsg', models.TextField(db_column='testResultMsg', verbose_name='任务执行的统计信息,详细统计,json字符串形式保存。')),
('testReportUrl', models.CharField(db_column='testReportUrl', max_length=200, verbose_name='测试报告链接')),
('taskExecuteIdList', models.CharField(db_column='taskExecuteIdList', default='', max_length=200, verbose_name='本次任务执行包含的任务执行Id')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('addBy', models.CharField(db_column='addBy', max_length=25, verbose_name='创建者登录名')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
],
options={
'db_table': 'tb2_dubbo_task_suite_execute',
},
),
migrations.CreateModel(
name='Tb2DubboTestcase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caseId', models.CharField(db_column='caseId', max_length=25, unique=True, verbose_name='caseId,可以理解为用例ID,格式HTTP_TESTCASE_1 - 99999999递增')),
('title', models.CharField(max_length=100, verbose_name='用例标题')),
('casedesc', models.TextField(default='', verbose_name='描述')),
('caselevel', models.IntegerField(default=5, verbose_name='用例优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('stepCount', models.IntegerField(db_column='stepCount', verbose_name='包含步骤数量')),
('status', models.IntegerField(default=2, verbose_name='用例状态,1新建待审核 2审核通过 3审核未通过')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTestcaseAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'db_table': 'tb2_dubbo_testcase',
},
),
migrations.CreateModel(
name='Tb2DubboTestcaseDebug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caseId', models.CharField(db_column='caseId', max_length=25, verbose_name='caseId,可以理解为用例ID,格式HTTP_TESTCASE_1 - 99999999递增')),
('title', models.CharField(max_length=100, verbose_name='用例标题')),
('casedesc', models.TextField(default='', verbose_name='描述')),
('caselevel', models.IntegerField(default=5, verbose_name='用例优先级,数字越小,优先级越高,从0-9。 0高 5中 9低')),
('stepCount', models.IntegerField(db_column='stepCount', verbose_name='包含步骤数量')),
('status', models.IntegerField(default=2, verbose_name='用例状态,1新建待审核 2审核通过 3审核未通过')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('execStatus', models.IntegerField(db_column='execStatus', default=1, verbose_name='执行状态: NOTRUN = 1 RUNNING = 2 DONE = 3 EXCEPTION = 4')),
('assertResult', models.TextField(blank=True, db_column='assertResult', default='', verbose_name='断言结果')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='执行结果')),
('beforeExecuteTakeTime', models.IntegerField(db_column='beforeExecuteTakeTime', default=0, verbose_name='执行前耗时')),
('afterExecuteTakeTime', models.IntegerField(db_column='afterExecuteTakeTime', default=0, verbose_name='执行后耗时')),
('executeTakeTime', models.IntegerField(db_column='executeTakeTime', default=0, verbose_name='执行耗时')),
('totalTakeTime', models.IntegerField(db_column='totalTakeTime', default=0, verbose_name='总耗时')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTestcaseDebugAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('httpConfKey', models.ForeignKey(db_column='httpConfKey', max_length=20, on_delete=django.db.models.deletion.CASCADE, to='all_models.TbConfigHttp', to_field='httpConfKey', verbose_name='执行环境的httpConfKey')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'verbose_name': 'DUBBO用例调试',
'verbose_name_plural': '09DUBBO用例调试',
'db_table': 'tb2_dubbo_testcase_debug',
},
),
migrations.CreateModel(
name='Tb2DubboTestcaseStep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stepNum', models.IntegerField(db_column='stepNum', verbose_name='步骤编号,每个caseID中的有效编号是从1递增')),
('title', models.CharField(max_length=100, verbose_name='步骤标题,默认 步骤1,步骤2 等等')),
('stepDesc', models.TextField(default='', verbose_name='描述')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('fromInterfaceId', models.CharField(db_column='fromInterfaceId', default='', max_length=30, verbose_name='步骤引用的接口Id')),
('isSync', models.IntegerField(choices=[(0, '不同步'), (1, '同步')], default=0, verbose_name='是否同步')),
('varsPre', models.TextField(db_column='varsPre', default='', verbose_name='前置变量')),
('dubboSystem', models.CharField(db_column='dubboSystem', max_length=100, verbose_name='dubbo的project名称,比如mls-biz-support')),
('dubboService', models.CharField(db_column='dubboService', max_length=200, verbose_name='dubbo的service全路径,比如com.lianjia.mls.business.quality.facade.SharePoolHouseFacade')),
('dubboMethod', models.CharField(db_column='dubboMethod', max_length=100, verbose_name='dubbo的service中的具体method')),
('dubboParams', models.TextField(verbose_name='Dubbo invoke时请求的参数,多个params中间用半角逗号间隔')),
('encoding', models.CharField(db_column='encoding', default='gb18030', max_length=10, verbose_name='dubbo的service中的编码方式')),
('timeout', models.IntegerField(default=20, verbose_name='超时时间,单位秒')),
('varsPost', models.TextField(db_column='varsPost', verbose_name='后置变量')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTestcaseStepAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('caseId', models.ForeignKey(db_column='caseId', max_length=25, on_delete=django.db.models.deletion.CASCADE, to='all_models_for_dubbo.Tb2DubboTestcase', to_field='caseId', verbose_name='Tb2DubboTestcase表中的caseID')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'db_table': 'tb2_dubbo_testcase_step',
},
),
migrations.CreateModel(
name='Tb2DubboTestcaseStepDebug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caseId', models.CharField(db_column='caseId', max_length=25, verbose_name='caseId,可以理解为用例ID,格式HTTP_TESTCASE_1 - 99999999递增')),
('stepNum', models.IntegerField(db_column='stepNum', verbose_name='步骤编号,每个caseID中的有效编号是从1递增')),
('title', models.CharField(max_length=100, verbose_name='步骤标题,默认 步骤1,步骤2 等等')),
('stepDesc', models.TextField(default='', verbose_name='描述')),
('caseType', models.IntegerField(default=2, verbose_name='用例类型,0测试用例,不计入统计,不进入任务,1 接口计入统计 2接口步骤均计入统计 3步骤计入统计')),
('fromInterfaceId', models.CharField(db_column='fromInterfaceId', default='', max_length=30, verbose_name='步骤引用的接口Id')),
('isSync', models.IntegerField(choices=[(0, '不同步'), (1, '同步')], default=0, verbose_name='是否同步')),
('varsPre', models.TextField(db_column='varsPre', default='', verbose_name='前置变量')),
('dubboSystem', models.CharField(db_column='dubboSystem', max_length=100, verbose_name='dubbo的project名称,比如mls-biz-support')),
('dubboService', models.CharField(db_column='dubboService', max_length=200, verbose_name='dubbo的service全路径,比如com.lianjia.mls.business.quality.facade.SharePoolHouseFacade')),
('dubboMethod', models.CharField(db_column='dubboMethod', max_length=100, verbose_name='dubbo的service中的具体method')),
('dubboParams', models.TextField(verbose_name='Dubbo invoke时请求的参数,多个params中间用半角逗号间隔')),
('encoding', models.CharField(db_column='encoding', default='gb18030', max_length=10, verbose_name='dubbo的service中的编码方式')),
('timeout', models.IntegerField(default=20, verbose_name='超时时间,单位秒')),
('varsPost', models.TextField(db_column='varsPost', verbose_name='后置变量')),
('execStatus', models.IntegerField(db_column='execStatus', default=1, verbose_name='执行状态')),
('actualResult', models.TextField(blank=True, db_column='actualResult', default='', verbose_name='实际结果')),
('assertResult', models.TextField(blank=True, db_column='assertResult', default='', verbose_name='断言结果')),
('testResult', models.CharField(db_column='testResult', default='NOTRUN', max_length=20, verbose_name='执行结果')),
('beforeExecuteTakeTime', models.IntegerField(db_column='beforeExecuteTakeTime', default=0, verbose_name='执行前耗时')),
('afterExecuteTakeTime', models.IntegerField(db_column='afterExecuteTakeTime', default=0, verbose_name='执行后耗时')),
('executeTakeTime', models.IntegerField(db_column='executeTakeTime', default=0, verbose_name='执行耗时')),
('totalTakeTime', models.IntegerField(db_column='totalTakeTime', default=0, verbose_name='总耗时')),
('version', models.CharField(db_column='version', default='CurrentVersion', max_length=25, verbose_name='执行的版本')),
('state', models.IntegerField(default=1, verbose_name='状态 0删除 1有效')),
('modBy', models.CharField(db_column='modBy', max_length=25, null=True, verbose_name='修改者登录名')),
('addTime', models.DateTimeField(auto_now_add=True, db_column='addTime', verbose_name='创建时间')),
('modTime', models.DateTimeField(auto_now=True, db_column='modTime', verbose_name='修改时间')),
('addBy', models.ForeignKey(db_column='addBy', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboTestcaseStepDebugAddBy', to='all_models.TbUser', to_field='loginName', verbose_name='创建者登录名')),
('businessLineId', models.ForeignKey(db_column='businessLineId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbBusinessLine', verbose_name='业务线ID')),
('httpConfKey', models.ForeignKey(db_column='httpConfKey', max_length=20, on_delete=django.db.models.deletion.CASCADE, to='all_models.TbConfigHttp', to_field='httpConfKey', verbose_name='执行环境的httpConfKey')),
('moduleId', models.ForeignKey(db_column='moduleId', on_delete=django.db.models.deletion.CASCADE, to='all_models.TbModules', verbose_name='模块ID')),
],
options={
'verbose_name': '用例步骤调试',
'verbose_name_plural': '10用例步骤调试',
'db_table': 'tb2_dubbo_testcase_step_debug',
},
),
migrations.AddField(
model_name='tb2dubbointerfaceexecutehistory',
name='taskExecuteId',
field=models.ForeignKey(db_column='taskExecuteId', on_delete=django.db.models.deletion.CASCADE, related_name='Tb2DubboInterfaceExecuteHistoryTaskExecuteId', to='all_models_for_dubbo.Tb2DubboTaskExecute', verbose_name='任务执行表的主键ID,关联哪次执行的任务'),
),
migrations.AlterUniqueTogether(
name='tb2dubbotestcasestep',
unique_together=set([('caseId', 'stepNum')]),
),
migrations.AlterUniqueTogether(
name='tb2dubbointerfaceexecutehistory',
unique_together=set([('interfaceUrl', 'taskExecuteId')]),
),
]
| 95.470103
| 275
| 0.657992
| 4,837
| 46,303
| 6.098201
| 0.092413
| 0.127538
| 0.053599
| 0.072516
| 0.883954
| 0.870529
| 0.859545
| 0.856053
| 0.856053
| 0.850188
| 0
| 0.023682
| 0.188368
| 46,303
| 484
| 276
| 95.667355
| 0.761209
| 0.001469
| 0
| 0.718487
| 1
| 0.004202
| 0.286425
| 0.08094
| 0
| 0
| 0
| 0
| 0.006303
| 1
| 0
| false
| 0.018908
| 0.006303
| 0
| 0.014706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.