hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48dc631b1c4a2cb88ce03ced0e0bc40234df9515
| 66,039
|
py
|
Python
|
nidaqmx/stream_writers.py
|
TheWiselyBearded/nidaqmx-python
|
4aff91f8302d6e75a954ceed88d55cf1836c2d04
|
[
"MIT"
] | 1
|
2022-03-14T19:46:36.000Z
|
2022-03-14T19:46:36.000Z
|
nidaqmx/stream_writers.py
|
TheWiselyBearded/nidaqmx-python
|
4aff91f8302d6e75a954ceed88d55cf1836c2d04
|
[
"MIT"
] | null | null | null |
nidaqmx/stream_writers.py
|
TheWiselyBearded/nidaqmx-python
|
4aff91f8302d6e75a954ceed88d55cf1836c2d04
|
[
"MIT"
] | 2
|
2022-03-14T19:46:51.000Z
|
2022-03-14T20:16:57.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy
from nidaqmx import DaqError
from nidaqmx._task_modules.write_functions import (
_write_analog_f_64, _write_analog_scalar_f_64, _write_binary_i_16,
_write_binary_i_32, _write_binary_u_16, _write_binary_u_32,
_write_ctr_freq, _write_ctr_ticks, _write_ctr_time, _write_ctr_freq_scalar,
_write_ctr_ticks_scalar, _write_ctr_time_scalar, _write_digital_u_8,
_write_digital_u_16, _write_digital_u_32, _write_digital_lines,
_write_digital_scalar_u_32)
from nidaqmx.error_codes import DAQmxErrors
__all__ = ['AnalogSingleChannelWriter', 'AnalogMultiChannelWriter',
'AnalogUnscaledWriter', 'CounterWriter',
'DigitalSingleChannelWriter', 'DigitalMultiChannelWriter']
class UnsetAutoStartSentinel(object):
pass
AUTO_START_UNSET = UnsetAutoStartSentinel()
del UnsetAutoStartSentinel
class ChannelWriterBase(object):
"""
Defines base class for all NI-DAQmx stream writers.
"""
def __init__(self, task_out_stream, auto_start=AUTO_START_UNSET):
"""
Args:
task_out_stream: Specifies the output stream associated with
an NI-DAQmx task which to write samples.
auto_start (Optional[bool]): Specifies if the write method
automatically starts the task if you did not explicitly
start it with the DAQmx Start Task method.
If you do not specify a value for this parameter,
NI-DAQmx determines its value based on the type of write
method used. If you use a one sample write method, the
value is True; conversely, if you use a many sample
write method, the value is False.
"""
print("Out stream calls ChannelWriter Base")
self._out_stream = task_out_stream
self._task = task_out_stream._task
self._handle = task_out_stream._task._handle
self._verify_array_shape = True
self._auto_start = auto_start
@property
def auto_start(self):
"""
bool: Specifies if the write method automatically starts the
task if you did not explicitly start it with the DAQmx Start
Task method.
If you do not specify a value for this parameter, NI-DAQmx
determines its value based on the type of write method used.
If you use a one sample write method, its value is True;
conversely, if you use a many sample write method, its value
is False.
"""
return self._auto_start
@auto_start.setter
def auto_start(self, val):
self._auto_start = val
@auto_start.deleter
def auto_start(self):
self._auto_start = AUTO_START_UNSET
@property
def verify_array_shape(self):
"""
bool: Indicates whether the size and shape of the user-defined
NumPy arrays passed to read methods are verified. Defaults
to True when this object is instantiated.
Setting this property to True may marginally adversely
impact the performance of read methods.
"""
return self._verify_array_shape
@verify_array_shape.setter
def verify_array_shape(self, val):
self._verify_array_shape = val
def _verify_array(self, data, is_many_chan, is_many_samp):
"""
Verifies that the shape of the specified NumPy array can be used
with the specified write method type, if the
"verify_array_shape" property is set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
is_many_chan (bool): Specifies if the write method is a many
channel version.
is_many_samp (bool): Specifies if the write method is a many
sample version.
"""
if not self._verify_array_shape:
return
channels_to_write = self._task.channels
number_of_channels = len(channels_to_write.channel_names)
expected_num_dimensions = None
if is_many_chan:
if is_many_samp:
expected_num_dimensions = 2
else:
expected_num_dimensions = 1
if data.shape[0] != number_of_channels:
self._task._raise_invalid_write_num_chans_error(
number_of_channels, data.shape[0])
else:
if is_many_samp:
expected_num_dimensions = 1
if expected_num_dimensions is not None:
self._raise_error_if_invalid_write_dimensions(
expected_num_dimensions, len(data.shape))
def _verify_array_digital_lines(
self, data, is_many_chan, is_many_line):
"""
Verifies that the shape of the specified NumPy array can be used
to read samples from the current task which contains one or more
channels that have one or more digital lines per channel, if the
"verify_array_shape" property is set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
is_many_chan (bool): Specifies if the write method is a
many channel version.
is_many_line (bool): Specifies if the write method is a
many line version.
"""
if not self._verify_array_shape:
return
channels_to_write = self._task.channels
number_of_channels = len(channels_to_write.channel_names)
number_of_lines = self._out_stream.do_num_booleans_per_chan
expected_num_dimensions = None
if is_many_chan:
if data.shape[0] != number_of_channels:
self._task._raise_invalid_write_num_chans_error(
number_of_channels, data.shape[0])
if is_many_line:
expected_num_dimensions = 2
if data.shape[1] != number_of_lines:
self._task._raise_invalid_num_lines_error(
number_of_lines, data.shape[1])
else:
expected_num_dimensions = 1
else:
if is_many_line:
expected_num_dimensions = 1
if data.shape[0] != number_of_lines:
self._task._raise_invalid_num_lines_error(
number_of_lines, data.shape[0])
if expected_num_dimensions is not None:
self._raise_error_if_invalid_write_dimensions(
expected_num_dimensions, len(data.shape))
def _raise_error_if_invalid_write_dimensions(
self, num_dimensions_expected, num_dimensions_in_data):
if num_dimensions_expected != num_dimensions_in_data:
raise DaqError(
'Write cannot be performed because the NumPy array passed '
'into this function is not shaped correctly. '
'You must pass in a NumPy array of the correct number of '
'dimensions based on the write method you use.\n\n'
'No. of dimensions of NumPy Array provided: {0}\n'
'No. of dimensions of NumPy Array required: {1}'
.format(num_dimensions_in_data, num_dimensions_expected),
DAQmxErrors.UNKNOWN.value, task_name=self._task.name)
class AnalogSingleChannelWriter(ChannelWriterBase):
"""
Writes samples to an analog output channel in an NI-DAQmx task.
"""
def write_many_sample(self, data, timeout=10.0):
"""
Writes one or more floating-point samples to a single analog
output channel in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of
floating-point samples to write to the task. Each
element of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(data, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_analog_f_64(
self._handle, data, data.shape[0], auto_start, timeout)
def write_one_sample(self, data, timeout=10):
"""
Writes a single floating-point sample to a single analog output
channel in a task.
Args:
data (float): Specifies the floating-point sample to write
to the task.
auto_start (Optional[bool]): Specifies if this method
automatically starts the task if you did not explicitly
start it with the DAQmx Start Task method.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_analog_scalar_f_64(
self._handle, data, auto_start, timeout)
class AnalogMultiChannelWriter(ChannelWriterBase):
"""
Writes samples to one or more analog output channels in an NI-DAQmx
task.
"""
def write_many_sample(self, data, timeout=10.0):
"""
Writes one or more floating-point samples to one or more analog
output channels in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of
floating-point samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
The order of the channels in the array corresponds to
the order in which you add the channels to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_analog_f_64(
self._handle, data, data.shape[1], auto_start, timeout)
def write_one_sample(self, data, timeout=10):
"""
Writes a single floating-point sample to one or more analog
output channels in a task.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of
floating-point samples to write to the task.
Each element of the array corresponds to a channel in
the task. The order of the channels in the array
corresponds to the order in which you add the channels
to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array(data, True, False)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_analog_f_64(
self._handle, data, 1, auto_start, timeout)
class AnalogUnscaledWriter(ChannelWriterBase):
"""
Writes unscaled samples to one or more analog output channels in
an NI-DAQmx task.
"""
def write_int16(self, data, timeout=10.0):
"""
Writes one or more unscaled 16-bit integer samples to one or
more analog output channels in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of unscaled
16-bit integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_binary_i_16(
self._handle, data, data.shape[1], auto_start, timeout)
def write_int32(self, data, timeout=10.0):
"""
Writes one or more unscaled 32-bit integer samples to one or
more analog output channels in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of unscaled
32-bit integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_binary_i_32(
self._handle, data, data.shape[1], auto_start, timeout)
def write_uint16(self, data, timeout=10.0):
"""
Writes one or more unscaled 16-bit unsigned integer samples to
one or more analog output channels in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of unscaled
16-bit unsigned integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_binary_u_16(
self._handle, data, data.shape[1], auto_start, timeout)
def write_uint32(self, data, timeout=10.0):
"""
Writes one or more unscaled 32-bit unsigned integer samples to
one or more analog output channels in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of unscaled
32-bit unsigned integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_binary_u_32(
self._handle, data, data.shape[1], auto_start, timeout)
class CounterWriter(ChannelWriterBase):
"""
Writes samples to a counter output channel in an NI-DAQmx task.
"""
def write_many_sample_pulse_frequency(
self, frequencies, duty_cycles, timeout=10.0):
"""
Writes one or more pulse samples in terms of frequency to a
single counter output channel in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
frequencies (numpy.ndarray): Contains a 1D NumPy array of
floating-point values that holds the frequency portion
of the pulse samples to write to the task. Each element
of the array corresponds to a sample to write.
duty_cycles (numpy.ndarray): Contains a 1D NumPy array of
floating-point values that holds the duty cycle portion
of the pulse samples to write to the task. Each element
of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(frequencies, False, True)
self._verify_array(duty_cycles, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_ctr_freq(
self._handle, frequencies, duty_cycles, frequencies.shape[0],
auto_start, timeout)
def write_many_sample_pulse_ticks(
self, high_ticks, low_ticks, timeout=10.0):
"""
Writes one or more pulse samples in terms of ticks to a single
counter output channel in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
high_ticks (numpy.ndarray): Contains a 1D NumPy array of
32-bit unsigned integer values that holds the high ticks
portion of the pulse samples to write to the task. Each
element of the array corresponds to a sample to write.
low_ticks (numpy.ndarray): Contains a 1D NumPy array of
32-bit unsigned integer values that holds the low ticks
portion of the pulse samples to write to the task. Each
element of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(high_ticks, False, True)
self._verify_array(low_ticks, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_ctr_ticks(
self._handle, high_ticks, low_ticks, high_ticks.shape[0],
auto_start, timeout)
def write_many_sample_pulse_time(
self, high_times, low_times, timeout=10.0):
"""
Writes one or more pulse samples in terms of time to a single
counter output channel in a task.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
high_times (numpy.ndarray): Contains a 1D NumPy array of
floating-point values that holds the high time portion
of the pulse samples to write to the task. Each element
of the array corresponds to a sample to write.
low_times (numpy.ndarray): Contains a 1D NumPy array of
floating-point values that holds the low time portion
of the pulse samples to write to the task. Each element
of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(high_times, False, True)
self._verify_array(low_times, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_ctr_time(
self._handle, high_times, low_times, high_times.shape[0],
auto_start, timeout)
def write_one_sample_pulse_frequency(
self, frequency, duty_cycle, timeout=10):
"""
Writes a new pulse frequency and duty cycle to a single counter
output channel in a task.
Args:
frequency (float): Specifies at what frequency to generate
pulses.
duty_cycle (float): Specifies the width of the pulse divided
by the pulse period. NI-DAQmx uses this ratio combined
with frequency to determine pulse width and the interval
between pulses.
auto_start (Optional[bool]): Specifies if this method
automatically starts the task if you did not explicitly
start it with the DAQmx Start Task method.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_ctr_freq_scalar(
self._handle, frequency, duty_cycle, auto_start, timeout)
def write_one_sample_pulse_ticks(
self, high_ticks, low_ticks, timeout=10):
"""
Writes a new pulse high tick count and low tick count to a
single counter output channel in a task.
Args:
high_ticks (float): Specifies the number of ticks the pulse
is high.
low_ticks (float): Specifies the number of ticks the pulse
is low.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_ctr_ticks_scalar(
self._handle, high_ticks, low_ticks, auto_start, timeout)
def write_one_sample_pulse_time(
self, high_time, low_time, timeout=10):
"""
Writes a new pulse high time and low time to a single counter
output channel in a task.
Args:
high_time (float): Specifies the amount of time the pulse
is high.
low_time (float): Specifies the amount of time the pulse
is low.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_ctr_time_scalar(
self._handle, high_time, low_time, auto_start, timeout)
class DigitalSingleChannelWriter(ChannelWriterBase):
"""
Writes samples to a single digital output channel in an NI-DAQmx
task.
"""
def write_many_sample_port_byte(self, data, timeout=10.0):
"""
Writes one or more 8-bit unsigned integer samples to a single
digital output channel in a task.
Use this method for devices with up to 8 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 8-bit
unsigned integer samples to write to the task. Each
element of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(data, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_8(
self._handle, data, data.shape[0], auto_start, timeout)
def write_many_sample_port_uint16(self, data, timeout=10.0):
"""
Writes one or more 16-bit unsigned integer samples to a single
digital output channel in a task.
Use this method for devices with up to 16 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 16-bit
unsigned integer samples to write to the task. Each
element of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(data, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_16(
self._handle, data, data.shape[0], auto_start, timeout)
def write_many_sample_port_uint32(self, data, timeout=10.0):
"""
Writes one or more 32-bit unsigned integer samples to a single
digital output channel in a task.
Use this method for devices with up to 32 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 32-bit
unsigned integer samples to write to the task. Each
element of the array corresponds to a sample to write.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote.
"""
self._verify_array(data, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_32(
self._handle, data, data.shape[0], auto_start, timeout)
def write_one_sample_multi_line(self, data, timeout=10):
"""
Writes a single boolean sample to a single digital output
channel in a task. The channel can contain multiple digital
lines.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of boolean
samples to write to the task. Each element of the array
corresponds to a line in the channel.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array_digital_lines(data, False, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_lines(
self._handle, data, 1, auto_start, timeout)
def write_one_sample_one_line(self, data, timeout=10):
"""
Writes a single boolean sample to a single digital output
channel in a task. The channel can contain only one digital
line.
Args:
data (int): Specifies the boolean sample to write to the
task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
numpy_array = numpy.asarray([data], dtype=numpy.bool)
return _write_digital_lines(
self._handle, numpy_array, 1, auto_start, timeout)
def write_one_sample_port_byte(self, data, timeout=10):
"""
Writes a single 8-bit unsigned integer sample to a single
digital output channel in a task.
Use this method for devices with up to 8 lines per port.
Args:
data (int): Specifies the 8-bit unsigned integer sample to
write to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
numpy_array = numpy.asarray([data], dtype=numpy.uint8)
return _write_digital_u_8(
self._handle, numpy_array, 1, auto_start, timeout)
def write_one_sample_port_uint16(self, data, timeout=10):
"""
Writes a single 16-bit unsigned integer sample to a single
digital output channel in a task.
Use this method for devices with up to 16 lines per port.
Args:
data (int): Specifies the 16-bit unsigned integer sample to
write to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
numpy_array = numpy.asarray([data], dtype=numpy.uint16)
return _write_digital_u_16(
self._handle, numpy_array, 1, auto_start, timeout)
def write_one_sample_port_uint32(self, data, timeout=10):
"""
Writes a single 32-bit unsigned integer sample to a single
digital output channel in a task.
Use this method for devices with up to 32 lines per port.
Args:
data (int): Specifies the 32-bit unsigned integer sample to
write to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_scalar_u_32(
self._handle, data, auto_start, timeout)
class DigitalMultiChannelWriter(ChannelWriterBase):
"""
Writes samples to one or more digital output channels in an NI-DAQmx
task.
"""
def write_many_sample_port_byte(self, data, timeout=10.0):
"""
Writes one or more 8-bit unsigned integer samples to one or more
digital output channels in a task.
Use this method for devices with up to 8 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of 8-bit
unsigned integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
The order of the channels in the array corresponds to
the order in which you add the channels to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_8(
self._handle, data, data.shape[1], auto_start, timeout)
def write_many_sample_port_uint16(self, data, timeout=10.0):
"""
Writes one or more 16-bit unsigned integer samples to one or
more digital output channels in a task.
Use this method for devices with up to 16 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of 16-bit
unsigned integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
The order of the channels in the array corresponds to
the order in which you add the channels to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_16(
self._handle, data, data.shape[1], auto_start, timeout)
def write_many_sample_port_uint32(self, data, timeout=10.0):
"""
Writes one or more 32-bit unsigned integer samples to one or
more digital output channels in a task.
Use this method for devices with up to 32 lines per port.
If the task uses on-demand timing, this method returns only
after the device generates all samples. On-demand is the default
timing type if you do not use the timing property on the task to
configure a sample timing type. If the task uses any timing type
other than on-demand, this method returns immediately and does
not wait for the device to generate all samples. Your
application must determine if the task is done to ensure that
the device generated all samples.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of 32-bit
unsigned integer samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a sample to write to each channel.
The order of the channels in the array corresponds to
the order in which you add the channels to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
Returns:
int:
Specifies the actual number of samples this method
successfully wrote to each channel in the task.
"""
self._verify_array(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else False)
return _write_digital_u_32(
self._handle, data, data.shape[1], auto_start, timeout)
def write_one_sample_multi_line(self, data, timeout=10):
"""
Writes a single boolean sample to one or more digital output
channels in a task. The channel can contain multiple digital
lines.
Args:
data (numpy.ndarray): Contains a 2D NumPy array of boolean
samples to write to the task.
Each row corresponds to a channel in the task. Each
column corresponds to a line from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array_digital_lines(data, True, True)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_lines(
self._handle, data, 1, auto_start, timeout)
def write_one_sample_one_line(self, data, timeout=10):
"""
Writes a single boolean sample to one or more digital output
channels in a task. The channel can contain only one digital
line.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of boolean
samples to write to the task.
Each element in the array corresponds to a channel in
the task. The order of the channels in the array
corresponds to the order in which you add the channels
to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array_digital_lines(data, True, False)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_lines(
self._handle, data, 1, auto_start, timeout)
def write_one_sample_port_byte(self, data, timeout=10):
"""
Writes a single 8-bit unsigned integer sample to one or more
digital output channels in a task.
Use this method for devices with up to 8 lines per port.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 8-bit
unsigned integer samples to write to the task.
Each element in the array corresponds to a channel in
the task. The order of the channels in the array
corresponds to the order in which you add the channels
to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array(data, True, False)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_u_8(
self._handle, data, 1, auto_start, timeout)
def write_one_sample_port_uint16(self, data, timeout=10):
"""
Writes a single 16-bit unsigned integer sample to one or more
digital output channels in a task.
Use this method for devices with up to 16 lines per port.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 16-bit
unsigned integer samples to write to the task.
Each element in the array corresponds to a channel in
the task. The order of the channels in the array
corresponds to the order in which you add the channels
to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array(data, True, False)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_u_16(
self._handle, data, 1, auto_start, timeout)
def write_one_sample_port_uint32(self, data, timeout=10):
"""
Writes a single 32-bit unsigned integer sample to one or more
digital output channels in a task.
Use this method for devices with up to 32 lines per port.
Args:
data (numpy.ndarray): Contains a 1D NumPy array of 32-bit
unsigned integer samples to write to the task.
Each element in the array corresponds to a channel in
the task. The order of the channels in the array
corresponds to the order in which you add the channels
to the task.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for the method to write all samples.
NI-DAQmx performs a timeout check only if the method
must wait before it writes data. This method returns an
error if the time elapses. The default timeout is 10
seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to write the submitted samples. If the method could
not write all the submitted samples, it returns an error
and the number of samples successfully written.
"""
self._verify_array(data, True, False)
auto_start = (self._auto_start if self._auto_start is not
AUTO_START_UNSET else True)
return _write_digital_u_32(
self._handle, data, 1, auto_start, timeout)
| 46.76983
| 79
| 0.627765
| 8,904
| 66,039
| 4.550876
| 0.032794
| 0.037092
| 0.020533
| 0.022211
| 0.927889
| 0.918117
| 0.911626
| 0.898571
| 0.894277
| 0.88791
| 0
| 0.00895
| 0.335044
| 66,039
| 1,411
| 80
| 46.802977
| 0.913807
| 0.654371
| 0
| 0.622896
| 0
| 0
| 0.029737
| 0.006354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131313
| false
| 0.010101
| 0.026936
| 0
| 0.299663
| 0.006734
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48e432c7b34ae711c921fc1d92892e78a76c8b32
| 1,357
|
py
|
Python
|
tvof/cms/migrations/0010_auto_20180509_1800.py
|
kingsdigitallab/tvof-django
|
12cb0aec4e155345a13602c7d7dfd0882ec92129
|
[
"MIT"
] | null | null | null |
tvof/cms/migrations/0010_auto_20180509_1800.py
|
kingsdigitallab/tvof-django
|
12cb0aec4e155345a13602c7d7dfd0882ec92129
|
[
"MIT"
] | 33
|
2019-12-04T22:37:50.000Z
|
2022-02-10T07:15:35.000Z
|
tvof/cms/migrations/0010_auto_20180509_1800.py
|
kingsdigitallab/tvof-django
|
12cb0aec4e155345a13602c7d7dfd0882ec92129
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-05-09 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0009_auto_20180509_1742'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='title_fr',
field=models.CharField(blank=True, default=b'', help_text="The page title as you'd like it to be seen by the public", max_length=255, verbose_name=b'Title'),
),
migrations.AddField(
model_name='homepage',
name='title_fr',
field=models.CharField(blank=True, default=b'', help_text="The page title as you'd like it to be seen by the public", max_length=255, verbose_name=b'Title'),
),
migrations.AddField(
model_name='indexpage',
name='title_fr',
field=models.CharField(blank=True, default=b'', help_text="The page title as you'd like it to be seen by the public", max_length=255, verbose_name=b'Title'),
),
migrations.AddField(
model_name='richtextpage',
name='title_fr',
field=models.CharField(blank=True, default=b'', help_text="The page title as you'd like it to be seen by the public", max_length=255, verbose_name=b'Title'),
),
]
| 37.694444
| 169
| 0.617539
| 184
| 1,357
| 4.429348
| 0.331522
| 0.088344
| 0.112883
| 0.132515
| 0.727607
| 0.727607
| 0.727607
| 0.727607
| 0.727607
| 0.727607
| 0
| 0.045771
| 0.259396
| 1,357
| 35
| 170
| 38.771429
| 0.765174
| 0.050847
| 0
| 0.592593
| 1
| 0
| 0.263813
| 0.017899
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48e533184406a6704ec35d826a1d05528867b4dd
| 5,777
|
py
|
Python
|
tests/test_engine/test_queries/test_queryop_array_elemMatch.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_array_elemMatch.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_array_elemMatch.py
|
gitter-badger/MontyDB
|
849d03dc2cfed35739481e9acb1ff0bd8095c91b
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from montydb.errors import OperationFailure
def test_qop_elemMatch_1(monty_find, mongo_find):
docs = [
{"a": [3, 2, 1]},
{"a": [4, 5]}
]
spec = {"a": {"$elemMatch": {"$eq": 1}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_2(monty_find, mongo_find):
docs = [
{"a": [{"b": 1}, {"b": 2}]},
{"a": [{"b": 3}, {"b": 4}]},
]
spec = {"a": {"$elemMatch": {"b": 1}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_3(monty_find, mongo_find):
docs = [
{"a": [{"b": [10, 11]}, {"b": 2}]},
{"a": [{"b": [20, 21]}, {"b": 4}]},
]
spec = {"a.0": {"$elemMatch": {"b": {"$gt": 20}}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_elemMatch_4(monty_find, mongo_find):
docs = [
{"a": [{"b": 1}, {"b": 2}]},
{"a": [{"b": 3}, {"b": 4}]},
]
spec = {"a.0.b": {"$elemMatch": {"$eq": 1}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_elemMatch_5(monty_find, mongo_find):
docs = [
{"a": [{"b": [1]}, {"b": 2}]},
{"a": [{"b": 3}, {"b": 4}]},
]
spec = {"a.0.b": {"$elemMatch": {"$eq": 1}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_6(monty_find, mongo_find):
docs = [
{"a": [75, 82]},
{"a": [75, 88]},
]
spec = {"a": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_7(monty_find, mongo_find):
docs = [
{"a": [{"b": "x", "c": 9}, {"b": "z", "c": 8}]},
{"a": [{"b": "x", "c": 8}, {"b": "z", "c": 6}]},
]
spec = {"a": {"$elemMatch": {"b": "z", "c": {"$gte": 8}}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_8(monty_find, mongo_find):
docs = [
{"a": [{"b": "x", "c": 9}, {"b": "z", "c": 8}]},
{"a": [{"b": "x", "c": 8}, {"b": "z", "c": 6}]},
{"a": [{"b": "y", "c": 8}, {"b": "z", "c": 7}]},
]
spec = {"a": {"$elemMatch": {"$or": [{"b": "x"}, {"c": 6}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_9(monty_find, mongo_find):
docs = [
{"a": [[[1, 2], True], [[1, 2], True]]},
{"a": [[[1, 0], True], [[1, 3], False]]},
]
spec = {"a": {"$elemMatch": {"$or": [{"0": [1, 0]}, {"1": False}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_10(monty_find, mongo_find):
docs = [
{"a": [[[1, 2], True], [[1, 2], True], {"0": [1, 0], "1": False}]},
{"a": [[[1, 2], True], [[1, 2], True], {"0": [1, 0]}]},
{"a": [[[1, 0], True], [[1, 3], False]]},
]
spec = {"a": {"$elemMatch": {"$or": [{"0": [1, 0]}, {"1": False}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 3
assert monty_c.count() == mongo_c.count()
for i in range(3):
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_11(monty_find, mongo_find):
docs = [
{"a": [{"b": [10, 11]}, {"b": 2}]}, # won't get picked
{"a": [[{"b": [10, 11]}], {"b": 2}]},
{"a": [[{"b": [20, 21]}], {"b": 4}]},
]
spec = {"a.0": {"$elemMatch": {"$and": [{"b": [10, 11]}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_12(monty_find, mongo_find):
docs = [
{"a": [[{"b": [10, 11]}]]},
{"a": [[{"b": [20, 21]}]]},
]
spec = {"a.0.b": {"$elemMatch": {"$and": [{"0": 10}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_elemMatch_13(monty_find, mongo_find):
docs = [
{"a": [[{"b": [[10], 11]}]]},
{"a": [[{"b": [[20], 21]}]]},
]
spec = {"a.0.b": {"$elemMatch": {"$and": [{"0": 10}]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(mongo_c) == next(monty_c)
def test_qop_elemMatch_14(monty_find, mongo_find):
docs = [
{"a": [3, 2, 1]}
]
spec = {"a": {"$elemMatch": True}} # $elemMatch needs an Object
monty_c = monty_find(docs, spec)
with pytest.raises(OperationFailure):
next(monty_c)
| 27.122066
| 75
| 0.508395
| 841
| 5,777
| 3.273484
| 0.079667
| 0.106793
| 0.127497
| 0.096622
| 0.892481
| 0.888485
| 0.871776
| 0.871776
| 0.871776
| 0.852888
| 0
| 0.040284
| 0.243725
| 5,777
| 212
| 76
| 27.25
| 0.589837
| 0.007443
| 0
| 0.606452
| 0
| 0
| 0.056195
| 0
| 0
| 0
| 0
| 0
| 0.232258
| 1
| 0.090323
| false
| 0
| 0.012903
| 0
| 0.103226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48ee220a13f6aadc42f46f04a25df773694896f9
| 131
|
py
|
Python
|
comingsoon/views.py
|
epool/giftxme
|
22bedb03772fcfcb71762863d40c96b113e2ac32
|
[
"Apache-2.0"
] | 1
|
2017-09-22T03:03:41.000Z
|
2017-09-22T03:03:41.000Z
|
comingsoon/views.py
|
epool/giftxme
|
22bedb03772fcfcb71762863d40c96b113e2ac32
|
[
"Apache-2.0"
] | null | null | null |
comingsoon/views.py
|
epool/giftxme
|
22bedb03772fcfcb71762863d40c96b113e2ac32
|
[
"Apache-2.0"
] | null | null | null |
# Create your views here.
from django.shortcuts import render
def index(request):
return render(request, 'comingsoon/index.html')
| 26.2
| 48
| 0.78626
| 18
| 131
| 5.722222
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114504
| 131
| 5
| 48
| 26.2
| 0.887931
| 0.175573
| 0
| 0
| 0
| 0
| 0.196262
| 0.196262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
48fb246a27aee50146c1cff58122373bfe75cbd5
| 35
|
py
|
Python
|
pydscpack/__init__.py
|
aligirayhanozbay/pydscpack
|
48d1df0775e4b063cf387b3884b8b463b3660e89
|
[
"BSD-3-Clause"
] | null | null | null |
pydscpack/__init__.py
|
aligirayhanozbay/pydscpack
|
48d1df0775e4b063cf387b3884b8b463b3660e89
|
[
"BSD-3-Clause"
] | null | null | null |
pydscpack/__init__.py
|
aligirayhanozbay/pydscpack
|
48d1df0775e4b063cf387b3884b8b463b3660e89
|
[
"BSD-3-Clause"
] | null | null | null |
from .AnnulusMap import AnnulusMap
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2afbc634a494371c891e31e0b2a0369fa73a7d8
| 9,017
|
py
|
Python
|
tests/layer_tests/onnx_tests/test_squeeze.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tests/layer_tests/onnx_tests/test_squeeze.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tests/layer_tests/onnx_tests/test_squeeze.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestSqueeze(Caffe2OnnxLayerTest):
def create_squeeze_net(self, axes, input_shape, output_shape, ir_version):
"""
ONNX net IR net
Input->Squeeze(axes=0)->Output => Input->Reshape
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
node_squeeze_def = onnx.helper.make_node(
'Squeeze',
inputs=['input'],
outputs=['output'],
axes=axes
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_squeeze_def],
'test_squeeze_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_squeeze_model')
#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net = None
return onnx_net, ref_net
def create_squeeze_net_const(self, axes, input_shape, output_shape, ir_version):
"""
ONNX net IR net
Input->Concat(+squeezed const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
concat_axis = 0
concat_output_shape = output_shape.copy()
concat_output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, output_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, concat_output_shape)
const_number = np.prod(input_shape)
constant = np.random.randint(-127, 127, const_number).astype(np.float)
constant = np.reshape(constant, input_shape)
node_const_def = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
node_squeeze_def = onnx.helper.make_node(
'Squeeze',
inputs=['const1'],
outputs=['squeeze1'],
axes=axes
)
node_concat_def = onnx.helper.make_node(
'Concat',
inputs=['input', 'squeeze1'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_squeeze_def, node_concat_def],
'test_squeeze_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_squeeze_model')
#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net = None
return onnx_net, ref_net
test_data_5D = [
dict(axes=[0], input_shape=[1, 2, 3, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[1], input_shape=[2, 1, 3, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[2], input_shape=[2, 3, 1, 10, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[3], input_shape=[2, 3, 10, 1, 10], output_shape=[2, 3, 10, 10]),
dict(axes=[4], input_shape=[2, 3, 10, 10, 1], output_shape=[2, 3, 10, 10]),
dict(axes=[0, 1], input_shape=[1, 1, 3, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 2], input_shape=[1, 3, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 3], input_shape=[1, 3, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[0, 4], input_shape=[1, 3, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[1, 2], input_shape=[3, 1, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[1, 3], input_shape=[3, 1, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[1, 4], input_shape=[3, 1, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[2, 3], input_shape=[3, 10, 1, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[2, 4], input_shape=[3, 10, 1, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[3, 4], input_shape=[3, 10, 10, 1, 1], output_shape=[3, 10, 10]),
dict(axes=[0, 1, 2], input_shape=[1, 1, 1, 10, 10], output_shape=[10, 10]),
dict(axes=[0, 1, 3], input_shape=[1, 1, 10, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 1, 4], input_shape=[1, 1, 10, 10, 1], output_shape=[10, 10]),
dict(axes=[0, 2, 3], input_shape=[1, 10, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 2, 4], input_shape=[1, 10, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[0, 3, 4], input_shape=[1, 10, 10, 1, 1], output_shape=[10, 10]),
dict(axes=[1, 2, 3], input_shape=[10, 1, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[1, 2, 4], input_shape=[10, 1, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[1, 3, 4], input_shape=[10, 1, 10, 1, 1], output_shape=[10, 10]),
dict(axes=[2, 3, 4], input_shape=[10, 10, 1, 1, 1], output_shape=[10, 10])]
test_data_4D = [
dict(axes=[0], input_shape=[1, 3, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[1], input_shape=[3, 1, 10, 10], output_shape=[3, 10, 10]),
dict(axes=[2], input_shape=[3, 10, 1, 10], output_shape=[3, 10, 10]),
dict(axes=[3], input_shape=[3, 10, 10, 1], output_shape=[3, 10, 10]),
dict(axes=[0, 1], input_shape=[1, 1, 10, 10], output_shape=[10, 10]),
dict(axes=[0, 2], input_shape=[1, 10, 1, 10], output_shape=[10, 10]),
dict(axes=[0, 3], input_shape=[1, 10, 10, 1], output_shape=[10, 10]),
dict(axes=[1, 2], input_shape=[10, 1, 1, 10], output_shape=[10, 10]),
dict(axes=[1, 3], input_shape=[10, 1, 10, 1], output_shape=[10, 10]),
dict(axes=[2, 3], input_shape=[10, 10, 1, 1], output_shape=[10, 10])]
test_data_3D = [
dict(axes=[0], input_shape=[1, 10, 10], output_shape=[10, 10]),
dict(axes=[1], input_shape=[10, 1, 10], output_shape=[10, 10]),
dict(axes=[2], input_shape=[10, 10, 1], output_shape=[10, 10])]
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
| 41.552995
| 99
| 0.582123
| 1,271
| 9,017
| 3.898505
| 0.083399
| 0.048436
| 0.056509
| 0.084763
| 0.830474
| 0.810293
| 0.769324
| 0.758022
| 0.716852
| 0.620989
| 0
| 0.084725
| 0.269602
| 9,017
| 216
| 100
| 41.74537
| 0.667628
| 0.082289
| 0
| 0.39726
| 0
| 0
| 0.026918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0.061644
| 0
| 0.157534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2b03aaf7ed4aa2b4ebfddafc3d05531cce67e1f
| 39
|
py
|
Python
|
__init__.py
|
afernandezrti/rticonnextdds-connector-py
|
56cf102ccbf7ba9e01e56fce55439bef84e888b7
|
[
"CNRI-Python"
] | 24
|
2019-04-22T15:40:19.000Z
|
2022-01-17T06:39:38.000Z
|
__init__.py
|
afernandezrti/rticonnextdds-connector-py
|
56cf102ccbf7ba9e01e56fce55439bef84e888b7
|
[
"CNRI-Python"
] | 46
|
2019-04-04T14:59:45.000Z
|
2022-03-22T06:57:37.000Z
|
__init__.py
|
afernandezrti/rticonnextdds-connector-py
|
56cf102ccbf7ba9e01e56fce55439bef84e888b7
|
[
"CNRI-Python"
] | 17
|
2018-10-03T20:42:36.000Z
|
2022-01-06T02:36:32.000Z
|
from .rticonnextdds_connector import *
| 19.5
| 38
| 0.846154
| 4
| 39
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96001b1be26377bb3eafa3b47c330696425cbeed
| 13,952
|
py
|
Python
|
tests/api/v3_1_0/test_hotspot_portal.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_1_0/test_hotspot_portal.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_1_0/test_hotspot_portal.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI hotspot_portal API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_6cbcecf65a0155fcad602d3ac16531a7_v3_1_0').validate(obj.response)
return True
def get_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_by_id(api, validator):
try:
assert is_valid_get_hotspot_portal_by_id(
validator,
get_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_get_hotspot_portal_by_id(
validator,
get_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_0ae4af25df565334b20a24c4878b68e4_v3_1_0').validate(obj.response)
return True
def update_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.update_hotspot_portal_by_id(
active_validation=False,
customizations={'portalTheme': {'id': 'string', 'name': 'string', 'themeData': 'string'}, 'portalTweakSettings': {'bannerColor': 'string', 'bannerTextColor': 'string', 'pageBackgroundColor': 'string', 'pageLabelAndTextColor': 'string'}, 'language': {'viewLanguage': 'string'}, 'globalCustomizations': {'mobileLogoImage': {'data': 'string'}, 'desktopLogoImage': {'data': 'string'}, 'backgroundImage': {'data': 'string'}, 'bannerImage': {'data': 'string'}, 'bannerTitle': 'string', 'contactText': 'string', 'footerElement': 'string'}, 'pageCustomizations': {'data': [{'key': 'string', 'value': 'string'}]}},
description='string',
id='string',
name='string',
payload=None,
portal_test_url='string',
portal_type='string',
settings={'portalSettings': {'httpsPort': 0, 'allowedInterfaces': ['string'], 'certificateGroupTag': 'string', 'endpointIdentityGroup': 'string', 'coaType': 'string', 'displayLang': 'string', 'fallbackLanguage': 'string', 'alwaysUsedLanguage': 'string'}, 'aupSettings': {'requireAccessCode': True, 'accessCode': 'string', 'includeAup': True, 'requireScrolling': True}, 'postAccessBannerSettings': {'includePostAccessBanner': True}, 'authSuccessSettings': {'successRedirect': 'string', 'redirectUrl': 'string'}, 'postLoginBannerSettings': {'includePostAccessBanner': True}, 'supportInfoSettings': {'includeSupportInfoPage': True, 'includeMacAddr': True, 'includeIpAddress': True, 'includeBrowserUserAgent': True, 'includePolicyServer': True, 'includeFailureCode': True, 'emptyFieldDisplay': 'string', 'defaultEmptyFieldValue': 'string'}}
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_update_hotspot_portal_by_id(api, validator):
try:
assert is_valid_update_hotspot_portal_by_id(
validator,
update_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.update_hotspot_portal_by_id(
active_validation=False,
id='string',
customizations=None,
description=None,
name=None,
payload=None,
portal_test_url=None,
portal_type=None,
settings=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_update_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_update_hotspot_portal_by_id(
validator,
update_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_hotspot_portal_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1a344d1c6f535789b7badbaa502e8d3b_v3_1_0').validate(obj.response)
return True
def delete_hotspot_portal_by_id(api):
endpoint_result = api.hotspot_portal.delete_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_delete_hotspot_portal_by_id(api, validator):
try:
assert is_valid_delete_hotspot_portal_by_id(
validator,
delete_hotspot_portal_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_hotspot_portal_by_id_default(api):
endpoint_result = api.hotspot_portal.delete_hotspot_portal_by_id(
id='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_delete_hotspot_portal_by_id_default(api, validator):
try:
assert is_valid_delete_hotspot_portal_by_id(
validator,
delete_hotspot_portal_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_hotspot_portal(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_d912b1c21e2b5dca8b56332d3a8ad13d_v3_1_0').validate(obj.response)
return True
def get_hotspot_portal(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal(api, validator):
try:
assert is_valid_get_hotspot_portal(
validator,
get_hotspot_portal(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_hotspot_portal_default(api):
endpoint_result = api.hotspot_portal.get_hotspot_portal(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_hotspot_portal_default(api, validator):
try:
assert is_valid_get_hotspot_portal(
validator,
get_hotspot_portal_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_hotspot_portal(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_0df78c9a3f72584dbd1c7b667b0e312f_v3_1_0').validate(obj.response)
return True
def create_hotspot_portal(api):
endpoint_result = api.hotspot_portal.create_hotspot_portal(
active_validation=False,
customizations={'portalTheme': {'id': 'string', 'name': 'string', 'themeData': 'string'}, 'portalTweakSettings': {'bannerColor': 'string', 'bannerTextColor': 'string', 'pageBackgroundColor': 'string', 'pageLabelAndTextColor': 'string'}, 'language': {'viewLanguage': 'string'}, 'globalCustomizations': {'mobileLogoImage': {'data': 'string'}, 'desktopLogoImage': {'data': 'string'}, 'backgroundImage': {'data': 'string'}, 'bannerImage': {'data': 'string'}, 'bannerTitle': 'string', 'contactText': 'string', 'footerElement': 'string'}, 'pageCustomizations': {'data': [{'key': 'string', 'value': 'string'}]}},
description='string',
name='string',
payload=None,
portal_test_url='string',
portal_type='string',
settings={'portalSettings': {'httpsPort': 0, 'allowedInterfaces': ['string'], 'certificateGroupTag': 'string', 'endpointIdentityGroup': 'string', 'coaType': 'string', 'displayLang': 'string', 'fallbackLanguage': 'string', 'alwaysUsedLanguage': 'string'}, 'aupSettings': {'requireAccessCode': True, 'accessCode': 'string', 'includeAup': True, 'requireScrolling': True}, 'postAccessBannerSettings': {'includePostAccessBanner': True}, 'authSuccessSettings': {'successRedirect': 'string', 'redirectUrl': 'string'}, 'postLoginBannerSettings': {'includePostAccessBanner': True}, 'supportInfoSettings': {'includeSupportInfoPage': True, 'includeMacAddr': True, 'includeIpAddress': True, 'includeBrowserUserAgent': True, 'includePolicyServer': True, 'includeFailureCode': True, 'emptyFieldDisplay': 'string', 'defaultEmptyFieldValue': 'string'}}
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_create_hotspot_portal(api, validator):
try:
assert is_valid_create_hotspot_portal(
validator,
create_hotspot_portal(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_hotspot_portal_default(api):
endpoint_result = api.hotspot_portal.create_hotspot_portal(
active_validation=False,
customizations=None,
description=None,
name=None,
payload=None,
portal_test_url=None,
portal_type=None,
settings=None
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_create_hotspot_portal_default(api, validator):
try:
assert is_valid_create_hotspot_portal(
validator,
create_hotspot_portal_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_91257d81be4f5a0486cc085499c19b1c_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.hotspot_portal.get_version(
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.hotspot_portal.get_version(
)
return endpoint_result
@pytest.mark.hotspot_portal
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 37.606469
| 844
| 0.707712
| 1,542
| 13,952
| 6.141375
| 0.156291
| 0.10982
| 0.05227
| 0.05924
| 0.834213
| 0.832946
| 0.831362
| 0.827772
| 0.824921
| 0.810665
| 0
| 0.013001
| 0.189579
| 13,952
| 370
| 845
| 37.708108
| 0.824533
| 0.083142
| 0
| 0.683099
| 0
| 0
| 0.20133
| 0.051779
| 0
| 0
| 0
| 0
| 0.126761
| 1
| 0.105634
| false
| 0
| 0.017606
| 0
| 0.207746
| 0.021127
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9620a38834ef8e8605baf91690ea5302de6f3247
| 21,005
|
py
|
Python
|
comport/department/views.py
|
isabella232/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 35
|
2015-11-14T18:32:45.000Z
|
2022-01-23T15:15:05.000Z
|
comport/department/views.py
|
codeforamerica/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 119
|
2015-11-20T22:45:34.000Z
|
2022-02-10T23:02:36.000Z
|
comport/department/views.py
|
isabella232/comport
|
117123862415261095a917ed7f2037c1f986b474
|
[
"BSD-3-Clause"
] | 19
|
2015-11-20T20:41:52.000Z
|
2022-01-26T04:12:34.000Z
|
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, redirect, url_for, flash, Response, abort
from .models import Department, Extractor
from comport.data.models import DemographicValue, DenominatorValue
from flask.ext.login import login_required
from comport.decorators import admin_or_department_required, authorized_access_only
import uuid
import datetime
blueprint = Blueprint("department", __name__, url_prefix='/department',
static_folder="../static")
# <<<<<<<< ADMIN ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>")
@login_required
@admin_or_department_required()
def department_dashboard(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
current_date = datetime.datetime.now()
return render_template("department/dashboard.html", department=department, current_month=current_date.month, current_year=current_date.year)
@blueprint.route("/<int:department_id>/activate", methods=['POST'])
@login_required
@admin_or_department_required()
def activate_extractor(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
if request.method == 'POST':
if request.form['submit'] == 'Activate':
password = str(uuid.uuid4())
extractor, envs = Extractor.from_department_and_password(department=department, password=password)
return render_template("department/extractorEnvs.html", department=department, envs=envs)
@blueprint.route("/<int:department_id>/start", methods=['POST'])
@login_required
@admin_or_department_required()
def start_extractor(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
if request.method == 'POST':
if request.form['submit'] == 'Set':
extractor = department.get_extractor()
extractor.next_year = request.form["year"]
extractor.next_month = request.form["month"]
extractor.save()
flash("Extractor start date set to {}/{}".format(extractor.next_month, extractor.next_year), "info")
return redirect(url_for('department.department_dashboard', department_id=department.id))
# <<<<<<<< EDIT ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>/edit/ois")
@login_required
@admin_or_department_required()
def edit_ois(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/useofforce")
@login_required
@admin_or_department_required()
def edit_use_of_force(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/complaints")
@login_required
@admin_or_department_required()
def edit_complaints(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/pursuits")
@login_required
@admin_or_department_required()
def edit_pursuits(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/assaultsonofficers")
@login_required
@admin_or_department_required()
def edit_assaultsonofficers(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=True)
@blueprint.route("/<int:department_id>/edit/demographics")
@login_required
@admin_or_department_required()
def edit_demographics(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template(
"department/demographics.html",
department=department,
department_values=department.get_raw_department_demographics(),
city_values=department.get_raw_city_demographics())
@blueprint.route("/<int:department_id>/demographicValue/create", methods=["POST"])
@login_required
@admin_or_department_required()
def new_demographic_row(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
DemographicValue.create(
department_id=department_id,
race=request.form["race"],
count=int(request.form["count"]),
department_value=request.form["department_or_city"] == "department")
return redirect(url_for(
'department.edit_demographics', department_id=department_id
))
@blueprint.route("/<int:department_id>/demographicValue/<int:value_id>/delete", methods=["POST"])
@login_required
@admin_or_department_required()
def delete_demographic_row(department_id, value_id):
department = Department.get_by_id(department_id)
value = DemographicValue.get_by_id(value_id)
if not department or not value:
abort(404)
value.delete()
return redirect(url_for(
'department.edit_demographics', department_id=department_id
))
@blueprint.route("/<int:department_id>/edit/denominators")
@login_required
@admin_or_department_required()
def edit_denominators(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template(
"department/denominators.html",
department=department,
denominator_values=department.denominator_values
)
@blueprint.route("/<int:department_id>/denominatorValue/create", methods=["POST"])
@login_required
@admin_or_department_required()
def new_denominator_row(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
DenominatorValue.create(
department_id=department_id,
month=int(request.form["month"]),
year=int(request.form["year"]),
officers_out_on_service=int(request.form["officersOutOnService"])
)
return redirect(url_for(
'department.edit_denominators', department_id=department_id
))
@blueprint.route("/<int:department_id>/denominatorValue/<int:value_id>/delete", methods=["POST"])
@login_required
@admin_or_department_required()
def delete_denominator_row(department_id, value_id):
department = Department.get_by_id(department_id)
value = DenominatorValue.get_by_id(value_id)
if not department or not value:
abort(404)
value.delete()
return redirect(url_for(
'department.edit_denominators', department_id=department_id
))
@blueprint.route("/<int:department_id>/edit/index", methods=["GET", "POST"])
@login_required
@admin_or_department_required()
def edit_index(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/index.html", department=department, chart_blocks=department.get_introduction_blocks(), editing=True)
# <<<<<<<< PREVIEW ENDPOINTS >>>>>>>>>>
@blueprint.route("/<int:department_id>/preview/ois")
@login_required
@admin_or_department_required()
def preview_ois(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/useofforce")
@login_required
@admin_or_department_required()
def preview_use_of_force(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/complaints")
@login_required
@admin_or_department_required()
def preview_complaints(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/pursuits")
@login_required
@admin_or_department_required()
def preview_pursuits(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/assaultsonofficers")
@login_required
@admin_or_department_required()
def preview_assaults(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=False)
@blueprint.route("/<int:department_id>/preview/index")
@login_required
@admin_or_department_required()
def preview_index(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/index.html", chart_blocks=department.get_introduction_blocks(), department=department, editing=False)
# <<<<<<<< SCHEMA ENDPOINTS >>>>>>>>>>
@blueprint.route('/<int:department_id>/preview/schema/complaints')
@login_required
@admin_or_department_required()
def complaints_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/complaints')
@login_required
@admin_or_department_required()
def complaints_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/useofforce')
@login_required
@admin_or_department_required()
def useofforce_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/useofforce')
@login_required
@admin_or_department_required()
def useofforce_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/edit/schema/ois')
@login_required
@admin_or_department_required()
def ois_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/ois')
@login_required
@admin_or_department_required()
def ois_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/preview/schema/pursuits')
@login_required
@admin_or_department_required()
def pursuits_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/pursuits')
@login_required
@admin_or_department_required()
def pursuits_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), editing=True)
@blueprint.route('/<int:department_id>/preview/schema/assaultsonofficers')
@login_required
@admin_or_department_required()
def assaults_schema_preview(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=False)
@blueprint.route('/<int:department_id>/edit/schema/assaultsonofficers')
@login_required
@admin_or_department_required()
def assaults_schema_edit(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=True)
# <<<<<<<< DATA ENDPOINTS >>>>>>>>>>
@blueprint.route('/<int:department_id>/uof.csv')
@authorized_access_only(dataset="use_of_force_incidents")
def use_of_force_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_uof_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/complaints.csv')
@authorized_access_only(dataset="citizen_complaints")
def complaints_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_complaint_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/pursuits.csv')
@authorized_access_only(dataset="pursuits")
def pursuits_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_pursuits_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/assaultsonofficers.csv')
@authorized_access_only(dataset="assaults_on_officers")
def assaults_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_assaults_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/ois.csv')
@authorized_access_only(dataset="officer_involved_shootings")
def ois_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_ois_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/officerCalls.csv')
@authorized_access_only()
def denominator_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_denominator_csv(), mimetype="text/csv")
@blueprint.route('/<int:department_id>/demographics.csv')
@authorized_access_only()
def demographics_csv(department_id):
department = Department.get_by_id(department_id)
if not department:
abort(404)
return Response(department.get_demographic_csv(), mimetype="text/csv")
# <<<<<<<< PUBLIC ENDPOINTS >>>>>>>>>>
@blueprint.route("/<short_name>/")
@authorized_access_only()
def public_intro(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/index.html", chart_blocks=department.get_introduction_blocks(), department=department, editing=False, published=True)
@blueprint.route("/<short_name>/complaints/")
@authorized_access_only(dataset="citizen_complaints")
def public_complaints(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/complaints.html", department=department, chart_blocks=department.get_complaint_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/complaints/')
@authorized_access_only(dataset="citizen_complaints")
def public_complaints_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/complaints.html", department=department, chart_blocks=department.get_complaint_schema_blocks(), published=True)
@blueprint.route("/<short_name>/pursuits/")
@authorized_access_only(dataset="pursuits")
def public_pursuits(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/pursuits.html", department=department, chart_blocks=department.get_pursuits_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/pursuits/')
@authorized_access_only(dataset="pursuits")
def public_pursuits_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/pursuits.html", department=department, chart_blocks=department.get_pursuits_schema_blocks(), published=True)
@blueprint.route("/<short_name>/assaultsonofficers/")
@authorized_access_only(dataset="assaults_on_officers")
def public_assaults(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/assaults.html", department=department, chart_blocks=department.get_assaults_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/assaultsonofficers/')
@authorized_access_only(dataset="assaults_on_officers")
def public_assaults_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/assaults.html", department=department, chart_blocks=department.get_assaults_schema_blocks(), editing=False, published=True)
@blueprint.route("/<short_name>/useofforce/")
@authorized_access_only(dataset="use_of_force_incidents")
def public_uof(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/useofforce.html", department=department, chart_blocks=department.get_uof_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/useofforce/')
@authorized_access_only(dataset="use_of_force_incidents")
def public_uof_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/useofforce.html", department=department, chart_blocks=department.get_uof_schema_blocks(), editing=False, published=True)
@blueprint.route("/<short_name>/officerinvolvedshootings/")
@authorized_access_only(dataset="officer_involved_shootings")
def public_ois(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/ois.html", department=department, chart_blocks=department.get_ois_blocks(), editing=False, published=True)
@blueprint.route('/<short_name>/schema/officerinvolvedshootings/')
@authorized_access_only(dataset="officer_involved_shootings")
def public_ois_schema(short_name):
department = Department.query.filter_by(short_name=short_name.upper()).first()
if not department:
abort(404)
return render_template("department/site/schema/ois.html", department=department, chart_blocks=department.get_ois_schema_blocks(), editing=False, published=True)
| 42.094188
| 174
| 0.768769
| 2,555
| 21,005
| 6.028963
| 0.052838
| 0.099714
| 0.047715
| 0.061023
| 0.881459
| 0.862049
| 0.841989
| 0.827577
| 0.765256
| 0.700792
| 0
| 0.007992
| 0.112402
| 21,005
| 498
| 175
| 42.178715
| 0.818226
| 0.011378
| 0
| 0.584507
| 0
| 0
| 0.176318
| 0.155506
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115023
| false
| 0.004695
| 0.016432
| 0
| 0.246479
| 0.119718
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
829573989fd9375f501363497c29204dc8eccee7
| 4,359
|
py
|
Python
|
ngsutils/gtf/t/test_junctions.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 57
|
2015-03-09T01:26:45.000Z
|
2022-02-22T07:26:01.000Z
|
ngsutils/gtf/t/test_junctions.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 33
|
2015-02-03T23:24:46.000Z
|
2022-03-16T20:08:10.000Z
|
ngsutils/gtf/t/test_junctions.py
|
bgruening/ngsutils
|
417e90dc1918fb553dd84990f2c54bd8cea8f44d
|
[
"BSD-3-Clause"
] | 33
|
2015-01-18T16:47:47.000Z
|
2022-02-22T07:28:09.000Z
|
#!/usr/bin/env python
'''
Tests for gtfutils / junctions
'''
import os
import unittest
import StringIO
import ngsutils.gtf.junctions
from ngsutils.gtf import GTF
# >test1
# 1 2 3 4 5 6 7 8 9 100
# 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
# aaaaaaaaaCCCCCCCATGCtttttttttGCGCTTTGATCcccccccccCTGAGGGGGGGGGGGGGATCGgggggggggACTgggggggTCGAGGGGGGG
# exons:
# 10,20
# 30,40
# 50,70
# 90,100
# opt: 80-82
fa = os.path.join(os.path.dirname(__file__), 'test-junc.fa')
class GTFJunctionsTest(unittest.TestCase):
def testJunctionsSimple(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsMultiExon(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|80|82|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:36-40,49-53
GATCCTGA
>test1:36-40,79-82,89-93
GATCACTTCGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,79-82,89-93
ATCGACTTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoforms(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:16-20,49-53
ATGCCTGA
>test1:16-20,89-93
ATGCTCGA
>test1:36-40,49-53
GATCCTGA
>test1:36-40,89-93
GATCTCGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
def testJunctionsIsoformsKnown(self):
gtf = GTF(fileobj=StringIO.StringIO('''\
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|30|40|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar1"; isoform_id "iso1"
test1|test|exon|10|20|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|50|70|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
test1|test|exon|90|100|0|+|.|gene_id "foo1"; transcript_id "bar2"; isoform_id "iso1"
'''.replace('|', '\t')), quiet=True)
valid = '''\
>test1:16-20,29-33
ATGCGCGC
>test1:36-40,89-93
GATCTCGA
>test1:16-20,49-53
ATGCCTGA
>test1:66-70,89-93
ATCGTCGA
'''
out = StringIO.StringIO('')
ngsutils.gtf.junctions.gtf_junctions(gtf, fa, fragment_size=4, min_size=8, known=True, out=out, quiet=True)
self.assertEqual(out.getvalue(), valid)
if __name__ == '__main__':
unittest.main()
| 31.817518
| 115
| 0.682725
| 652
| 4,359
| 4.435583
| 0.15184
| 0.062241
| 0.089903
| 0.076072
| 0.788382
| 0.788382
| 0.788382
| 0.775588
| 0.775588
| 0.729253
| 0
| 0.139553
| 0.136958
| 4,359
| 136
| 116
| 32.051471
| 0.629187
| 0.093141
| 0
| 0.815534
| 0
| 0.194175
| 0.594663
| 0.191614
| 0
| 0
| 0
| 0
| 0.038835
| 1
| 0.038835
| false
| 0
| 0.048544
| 0
| 0.097087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82a2f6300b87eb5407f5a32ba6e167f583497783
| 319
|
py
|
Python
|
src/python/WMCore/WMBS/Oracle/Workflow/CheckInjectedWorkflow.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/Oracle/Workflow/CheckInjectedWorkflow.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/Oracle/Workflow/CheckInjectedWorkflow.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_CheckInjectedWorkflow_
Oracle implementation of Workflow.CheckInjectedWorkflow
"""
from WMCore.WMBS.MySQL.Workflow.CheckInjectedWorkflow import CheckInjectedWorkflow as MySQLCheckInjectedWorkflow
class CheckInjectedWorkflow(MySQLCheckInjectedWorkflow):
"""
Oracle version
"""
| 21.266667
| 112
| 0.805643
| 25
| 319
| 10.2
| 0.72
| 0.227451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115987
| 319
| 14
| 113
| 22.785714
| 0.904255
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
82e97b7361be0a1d6efa1f81c7f4932ef26aa802
| 353
|
py
|
Python
|
typet/__init__.py
|
contains-io/typet
|
1bbbaa57e5de636f834215b7878af652562c1e14
|
[
"MIT"
] | 17
|
2017-11-12T08:40:30.000Z
|
2021-12-15T20:30:23.000Z
|
typet/__init__.py
|
contains-io/typet
|
1bbbaa57e5de636f834215b7878af652562c1e14
|
[
"MIT"
] | 19
|
2017-11-12T21:57:10.000Z
|
2018-10-11T02:29:02.000Z
|
typet/__init__.py
|
contains-io/typet
|
1bbbaa57e5de636f834215b7878af652562c1e14
|
[
"MIT"
] | 1
|
2017-11-12T08:40:34.000Z
|
2017-11-12T08:40:34.000Z
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=wildcard-import,redefined-builtin
"""Contains all typet classes and functions."""
from __future__ import unicode_literals
from .meta import * # noqa: F401
from .objects import * # noqa: F401
from .path import * # noqa: F401
from .types import * # noqa: F401
from .validation import * # noqa: F401
| 29.416667
| 58
| 0.708215
| 46
| 353
| 5.326087
| 0.586957
| 0.204082
| 0.285714
| 0.293878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 0.172805
| 353
| 11
| 59
| 32.090909
| 0.784247
| 0.498584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d85a098b047bf2819391ee1b76f2cebfe1d518e
| 16,794
|
py
|
Python
|
tests/stream/test_v1.py
|
rune-labs/runeq-python
|
5fb487af9d55a16665a0ac8c07f761b4927ca4c3
|
[
"MIT"
] | 4
|
2020-05-18T20:52:24.000Z
|
2022-01-21T13:41:08.000Z
|
tests/stream/test_v1.py
|
rune-labs/runeq-python
|
5fb487af9d55a16665a0ac8c07f761b4927ca4c3
|
[
"MIT"
] | 1
|
2020-09-08T22:26:46.000Z
|
2020-09-08T23:23:35.000Z
|
tests/stream/test_v1.py
|
rune-labs/runeq-python
|
5fb487af9d55a16665a0ac8c07f761b4927ca4c3
|
[
"MIT"
] | null | null | null |
from unittest import mock, TestCase
from typing import List, Dict
import numpy as np
from runeq import Config, stream, errors
def mock_get_json_response(
bodies: List[dict],
calls: List,
status_code=200,
headers: List[Dict[str, str]] = None
):
"""Return a function that can be used to mock .get_json_response()
Args:
bodies: list of JSON bodies to return from response.json()
calls: list. Every time the function is called, args and kwargs will
be appended to this list.
status_code: status code to return for each response
headers: the response header to apply
"""
headers = headers or [{}] * len(bodies)
num = 0
def _func(*args, **kwargs):
nonlocal num
# add inputs to the list of calls that was provided
calls.append((args, kwargs))
resp = mock.MagicMock()
resp.headers = headers[num]
resp.status_code = status_code
resp.ok = (status_code < 400)
resp.json.return_value = bodies[num]
num += 1
return resp
return _func
def mock_get_csv_response(
bodies: List[str],
calls: List,
status_code=200,
headers: List[Dict[str, str]] = None
):
"""Return a function that can be used to mock .get_csv_response()
Args:
bodies: list of response bodies to return from response.text
calls: list. Every time the function is called, args and kwargs will
be appended to this list.
status_code: status code to return for each response
headers: the response header to apply
"""
headers = headers or [{}] * len(bodies)
num = 0
def _func(*args, **kwargs):
nonlocal num
# keep track of the kwargs that were used to call this
calls.append((args, kwargs))
resp = mock.MagicMock()
resp.headers = headers[num]
resp.status_code = status_code
resp.ok = (status_code < 400)
resp.text = bodies[num]
num += 1
return resp
return _func
class TestStreamV1Client(TestCase):
"""
Test stream.V1Client and the associated accessors.
"""
def setUp(self) -> None:
"""
Initialize a client, set up basic mocking.
"""
self.cfg = Config(
client_key_id='abc',
client_access_key='abc123',
)
self.client = stream.V1Client(self.cfg)
self.use_np_orig = stream.v1.USE_NUMPY
stream.v1.USE_NUMPY = False
def tearDown(self) -> None:
"""
Tear down monkey-patching.
"""
stream.v1.USE_NUMPY = self.use_np_orig
@mock.patch('runeq.stream.v1.requests')
def test_get_json_response(self, requests):
"""
Test the signature of JSON requests.
"""
for test_num, case in enumerate((
(self.client.Accel, '/v1/accel.json'),
(self.client.BandPower, '/v1/band_power.json'),
(self.client.Event, '/v1/event.json'),
(self.client.HeartRate, '/v1/heartrate.json'),
(self.client.LFP, '/v1/lfp.json'),
(
self.client.ProbabilitySymptom,
'/v1/probability_symptom.json'
),
(self.client.Rotation, '/v1/rotation.json'),
(self.client.Span, '/v1/span.json'),
(self.client.State, '/v1/state.json'),
)):
resource_creator, endpoint = case
resource = resource_creator(leslie='knope')
resource.get_json_response(ron='swanson', test_num=test_num)
requests.get.assert_has_calls([
mock.call(
self.cfg.stream_url + endpoint,
headers=self.cfg.auth_headers,
params={
'leslie': 'knope',
'ron': 'swanson',
'test_num': test_num,
}
),
])
@mock.patch('runeq.stream.v1.requests')
def test_get_csv_response(self, requests):
"""
Test the signature of CSV requests.
"""
for test_num, case in enumerate((
(self.client.Accel, '/v1/accel.csv'),
(self.client.BandPower, '/v1/band_power.csv'),
(self.client.HeartRate, '/v1/heartrate.csv'),
(self.client.LFP, '/v1/lfp.csv'),
(
self.client.ProbabilitySymptom,
'/v1/probability_symptom.csv'
),
(self.client.Rotation, '/v1/rotation.csv'),
(self.client.State, '/v1/state.csv'),
)):
resource_creator, endpoint = case
resource = resource_creator(leslie='knope')
resource.get_csv_response(ron='swanson', test_num=test_num)
requests.get.assert_has_calls([
mock.call(
self.cfg.stream_url + endpoint,
stream=True,
headers=self.cfg.auth_headers,
params={
'leslie': 'knope',
'ron': 'swanson',
'test_num': test_num,
}
),
])
def test_iter_json_data_with_token(self):
"""
Test the iterator over JSON responses, paginating with the next page
token header.
"""
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.Event,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.Span,
self.client.State,
)):
resource = resource_creator()
mock_responses = [
{'success': True, 'result': [], 'next_page': 1},
{'success': True, 'result': []}
]
calls = []
resource.get_json_response = mock_get_json_response(
mock_responses,
calls,
200,
[
{'X-Rune-Next-Page-Token': 'MTIzNDU2MDAwMA=='},
{},
]
)
iterator = resource.iter_json_data(test_num=test_num)
self.assertEqual(len(list(iterator)), 2)
# Check that all parameters were kept the same across calls,
# except for "next_page_token"
self.assertEqual(calls, [
((), {'test_num': test_num}),
(
(),
{
'test_num': test_num,
'next_page_token': 'MTIzNDU2MDAwMA=='
}
)
])
def test_iter_json_data(self):
"""
Test the iterator over JSON responses, following pagination with
the page number.
"""
results = [
{'a': 1},
{'b': 2}
]
mock_responses = [
{'success': True, 'result': results[0], 'next_page': 1},
{'success': True, 'result': results[1]}
]
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.Event,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.State,
)):
resource = resource_creator()
#
# Successful Requests
#
calls = []
resource.get_json_response = mock_get_json_response(
mock_responses,
calls
)
# Check the results
num_results = 0
iterator = resource.iter_json_data(test_num=test_num)
for i, actual in enumerate(iterator):
self.assertEqual(results[i], actual)
num_results += 1
self.assertEqual(num_results, 2)
# Check that all parameters were kept the same across calls,
# except for "page" (which must be incremented)
self.assertEqual(calls, [
((), {'test_num': test_num}),
((), {'test_num': test_num, 'page': 1})
])
#
# Request Error
# Iterator should check the response status for each request
#
err_details = {
"message": "i am an intentional error!",
"type": "TestError",
}
resource.get_json_response = mock_get_json_response(
[{'success': False, 'error': err_details}],
[],
status_code=404,
)
with self.assertRaises(errors.APIError) as e:
next(resource.iter_json_data())
err = e.exception
self.assertEqual(err.status_code, 404)
self.assertEqual(err.details, err_details)
def test_iter_csv_data_with_token(self):
"""
Test the iterator over CSV responses, which follows new pagination
"""
mock_responses = [
'good,better\nskiing,hiking\n',
'good,better\ncupcakes,brownies\n',
'',
]
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.State,
)):
resource = resource_creator()
#
# Successful Requests
#
calls = []
resource.get_csv_response = mock_get_csv_response(
mock_responses,
calls,
200,
[
{'X-Rune-Next-Page-Token': 'MTIzNDU2MDAwMA=='},
{'X-Rune-Next-Page-Token': 'MTIzNDU2MDAwMA=='},
{},
],
)
# Check the results
iterator = resource.iter_csv_text(test_num=test_num)
self.assertEqual(len(list(iterator)), 2)
# Check that all parameters were kept the same across calls,
# except for "next_page_token" (which will normally be different
# for each response)
self.assertEqual(calls, [
((), {'test_num': test_num}),
(
(),
{
'test_num': test_num,
'next_page_token': 'MTIzNDU2MDAwMA=='
}
),
(
(),
{
'test_num': test_num,
'next_page_token': 'MTIzNDU2MDAwMA=='
}
),
])
def test_iter_csv_data(self):
"""
Test the iterator over CSV responses, which follows pagination
"""
mock_responses = [
'good,better\nskiing,hiking\n',
'good,better\ncupcakes,brownies\n',
'',
]
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.State,
)):
resource = resource_creator()
#
# Successful Requests
#
calls = []
resource.get_csv_response = mock_get_csv_response(
mock_responses,
calls,
)
# Check the results
num_results = 0
iterator = resource.iter_csv_text(test_num=test_num)
for i, actual in enumerate(iterator):
self.assertEqual(mock_responses[i], actual)
num_results += 1
# although there are 3 responses, the last (empty) body should not
# be returned by the iterator
self.assertEqual(num_results, 2)
# Check that all parameters were kept the same across calls,
# except for "page" (which must be incremented)
self.assertEqual(calls, [
((), {'test_num': test_num}),
((), {'test_num': test_num, 'page': 1}),
((), {'test_num': test_num, 'page': 2}),
])
#
# Request Error
# Iterator should check the response status for each request
#
err_details = {
"message": "i am an intentional error!",
"type": "TestError",
}
# note: CSV endpoints return JSON on API errors
resource.get_csv_response = mock_get_json_response(
[{'success': False, 'error': err_details}],
[],
status_code=404,
)
with self.assertRaises(errors.APIError) as e:
next(resource.iter_csv_text())
err = e.exception
self.assertEqual(err.status_code, 404)
self.assertEqual(err.details, err_details)
def test_iter_points(self):
"""
Test iterating over data as points. Uses the CSV endpoint.
"""
mock_responses = [
'lower,higher,label\n1,2,ints\n3.5,6.7,floats\n',
'lower,higher,label\n,8.9,missing data\n',
'',
]
expected = [
{'lower': 1, 'higher': 2, 'label': 'ints'},
{'lower': 3.5, 'higher': 6.7, 'label': 'floats'},
{'lower': None, 'higher': 8.9, 'label': 'missing data'},
]
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.State,
)):
resource = resource_creator()
# replace get_csv_response on the resource
resource.get_csv_response = mock_get_csv_response(
mock_responses,
[],
)
for i, point in enumerate(resource.points()):
self.assertDictEqual(expected[i], point)
# replace get_csv_response again, to restart the mock responses
resource.get_csv_response = mock_get_csv_response(
mock_responses,
[],
)
for i, point in enumerate(resource):
self.assertDictEqual(expected[i], point)
# check dtype for "higher", which always has a numeric
# value in the test data
self.assertNotIsInstance(point['higher'], np.float64)
def test_iter_points_numpy(self):
"""
Test iterating over data as points, using Numpy to convert.
"""
stream.v1.USE_NUMPY = True
mock_responses = [
'lower,higher,label\n1,2,ints\n3.5,6.7,floats\n',
'lower,higher,label\n,8.9,missing data\n',
'',
]
expected = [
{'lower': 1, 'higher': 2, 'label': 'ints'},
{'lower': 3.5, 'higher': 6.7, 'label': 'floats'},
{'lower': np.NaN, 'higher': 8.9, 'label': 'missing data'},
]
for test_num, resource_creator in enumerate((
self.client.Accel,
self.client.BandPower,
self.client.HeartRate,
self.client.LFP,
self.client.ProbabilitySymptom,
self.client.Rotation,
self.client.State,
)):
resource = resource_creator()
# replace get_csv_response on the resource
resource.get_csv_response = mock_get_csv_response(
mock_responses,
[],
)
for i, point in enumerate(resource.points()):
self.assertDictEqual(expected[i], point)
# check dtype for "higher", which always has a numeric
# value in the test data
self.assertIsInstance(point['higher'], np.float64)
| 32.483559
| 78
| 0.495891
| 1,638
| 16,794
| 4.934676
| 0.147741
| 0.076704
| 0.029939
| 0.038105
| 0.841148
| 0.787331
| 0.750217
| 0.726587
| 0.710504
| 0.680317
| 0
| 0.012166
| 0.402882
| 16,794
| 516
| 79
| 32.546512
| 0.793877
| 0.150411
| 0
| 0.692958
| 0
| 0.005634
| 0.09591
| 0.032042
| 0
| 0
| 0
| 0
| 0.064789
| 1
| 0.039437
| false
| 0
| 0.011268
| 0
| 0.064789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7dad60f91a776a2483da20380918c138b1e31a04
| 5,082
|
py
|
Python
|
VScodePython/graphs.py
|
horbosis556/Scientific-Calculator-
|
98ab96fa04b4719f786fa65089700697e74f0386
|
[
"MIT"
] | 1
|
2021-01-16T10:05:43.000Z
|
2021-01-16T10:05:43.000Z
|
VScodePython/graphs.py
|
horbosis556/Scientific-Calculator-
|
98ab96fa04b4719f786fa65089700697e74f0386
|
[
"MIT"
] | null | null | null |
VScodePython/graphs.py
|
horbosis556/Scientific-Calculator-
|
98ab96fa04b4719f786fa65089700697e74f0386
|
[
"MIT"
] | null | null | null |
from tkinter import *
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
def plot_sin():
# TkInter Window For The Graph.
win = Tk()
win.title('Graph')
win.geometry("500x500")
# Figsize = (5x5)inches, rads -> (0 <= rads <= 2π), vals -> (-1 <= vals <= 1)
# Use Default Dots Per Inch(100).
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = np.sin(x)
axe.plot(x, y, 'r')
axe.grid(True) # I Added Grid And Color To Each Graph, For The Eyes To Read Them Easier.
axe.set_xlabel('Angles')
axe.set_title('Y = Sin(x)')
# axe.set_xticks() Provides Accurate Distance Measurement Between Each Quadrant.
axe.set_xticks([0, 1.5, 3.1, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
# Figure Axes Plot On A Canvas, Which Is Then Placed In TkInter Window by .get_tk_widget() method.
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
# Toolbars For Each Graph.
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
def plot_cos():
win = Tk()
win.title('Y = Cos(x)')
win.geometry("500x500")
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = np.cos(x)
axe.plot(x, y, 'g')
axe.grid(True)
axe.set_xlabel('Angles')
axe.set_title('Y = Cos(x)')
axe.set_xticks([0, 1.6, 3.1, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
def plot_tan():
win = Tk()
win.title('Y = Tan(x)')
win.geometry("500x500")
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = np.tan(x)
axe.plot(x, y, 'm')
axe.grid(True)
axe.set_xlabel('Angles')
axe.set_title('Y = Tan(x)')
axe.set_xticks([0, 1.5, 3, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
# Unable to Use SymPy and mpmath
def plot_csc():
win = Tk()
win.title('Y = Csc(x)')
win.geometry("500x500")
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = [1 / np.sin(i) for i in x]
axe.plot(x, y, 'c')
axe.grid(True)
axe.set_xlabel('Angles')
axe.set_title('Y = Csc(x)')
axe.set_xticks([0, 1.5, 3, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
def plot_sec():
win = Tk()
win.title('Y = Csc(x)')
win.geometry("500x500")
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = [1 / np.cos(j) for j in x]
axe.plot(x, y, 'y')
axe.grid(True)
axe.set_xlabel('Angles')
axe.set_title('Y = Sec(x)')
axe.set_xticks([0, 1.5, 3, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
def plot_cot():
win = Tk()
win.title('Y = Csc(x)')
win.geometry("500x500")
x = np.arange(0, math.pi*2, 0.05)
fig = plt.figure()
axe = fig.add_axes([0.1, 0.1, 0.8, 0.8])
y = [1/ np.tan(k) for k in x]
axe.plot(x, y, 'b')
axe.grid(True)
axe.set_xlabel('Angles')
axe.set_title('Y = Cot(x)')
axe.set_xticks([0, 1.5, 3, 4.7])
axe.set_xticklabels(['(0° To 90°)','(90° To 180°)','(180° To 270°)','(270° To 360°)'])
axe.spines['left'].set_color('red')
canvas = FigureCanvasTkAgg(fig, master = win)
canvas.draw()
canvas.get_tk_widget().pack()
toolbar = NavigationToolbar2Tk(canvas, win)
toolbar.update()
canvas.get_tk_widget().pack()
| 29.206897
| 103
| 0.571232
| 837
| 5,082
| 3.442055
| 0.145759
| 0.052065
| 0.049636
| 0.070809
| 0.775425
| 0.755293
| 0.737591
| 0.732385
| 0.721972
| 0.721972
| 0
| 0.075213
| 0.238686
| 5,082
| 173
| 104
| 29.375723
| 0.657017
| 0.086777
| 0
| 0.734848
| 0
| 0
| 0.124381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7dc1514eec4a445105b35cbc0e5ffb0113daffdf
| 1,135
|
py
|
Python
|
tests/test_mqtt.py
|
sourcesimian/mqtt-kube
|
a199a27fd377b9a742f123d50b45d12f6cb58184
|
[
"MIT"
] | null | null | null |
tests/test_mqtt.py
|
sourcesimian/mqtt-kube
|
a199a27fd377b9a742f123d50b45d12f6cb58184
|
[
"MIT"
] | null | null | null |
tests/test_mqtt.py
|
sourcesimian/mqtt-kube
|
a199a27fd377b9a742f123d50b45d12f6cb58184
|
[
"MIT"
] | null | null | null |
from mqtt_kube.mqtt import TopicMatcher
class TestTopicMatcher:
def test_basic(self):
assert TopicMatcher('topic/one').match('topic/one') == True
assert TopicMatcher('topic/two').match('topic/one') == False
def test_plus(self):
assert TopicMatcher('topic/+/plus').match('topic/one/plus') == True
assert TopicMatcher('topic/+/plus').match('topic/one/extra/plus') == False
assert TopicMatcher('++/plus').match('topic/one/plus') == False
assert TopicMatcher('+one/plus').match('topic/one/plus') == False
assert TopicMatcher('+ne/plus').match('one/plus') == False
def test_hash(self):
assert TopicMatcher('#').match('topic/one/plus') == True
assert TopicMatcher('topic/#').match('topic/one/plus') == True
assert TopicMatcher('topic/two/#').match('topic/one/plus') == False
assert TopicMatcher('#/plus').match('one/plus') == False
def test_plus_and_hash(self):
assert TopicMatcher('+/+/plus/#').match('topic/one/plus/many/more') == True
assert TopicMatcher('+/+/minus/#').match('topic/one/plus/many/more') == False
| 45.4
| 85
| 0.638767
| 137
| 1,135
| 5.240876
| 0.189781
| 0.325905
| 0.199164
| 0.189415
| 0.703343
| 0.703343
| 0.644847
| 0.388579
| 0
| 0
| 0
| 0
| 0.168282
| 1,135
| 24
| 86
| 47.291667
| 0.760593
| 0
| 0
| 0
| 0
| 0
| 0.262555
| 0.042291
| 0
| 0
| 0
| 0
| 0.684211
| 1
| 0.210526
| false
| 0
| 0.052632
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7dc8dab4e7aa21ce5e8156ec657c478f099bfac1
| 32
|
py
|
Python
|
stubbs/defs/item.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
stubbs/defs/item.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
stubbs/defs/item.py
|
holy-crust/reclaimer
|
0aa693da3866ce7999c68d5f71f31a9c932cdb2c
|
[
"MIT"
] | null | null | null |
from ...hek.defs.item import *
| 16
| 31
| 0.65625
| 5
| 32
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7dcb115d63475624e95679351b87856244a03ed5
| 1,306
|
py
|
Python
|
build.py
|
thommyho/conan-oatpp
|
30084af0252e9b30486ed022f4aa5357aebafa16
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
thommyho/conan-oatpp
|
30084af0252e9b30486ed022f4aa5357aebafa16
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
thommyho/conan-oatpp
|
30084af0252e9b30486ed022f4aa5357aebafa16
|
[
"Apache-2.0"
] | null | null | null |
from cpt.packager import ConanMultiPackager
import platform
if __name__ == "__main__":
builder = ConanMultiPackager()
if platform.system() == "Windows":
builder.add(settings={"arch": "x86_64", "build_type": "Debug", "compiler": "Visual Studio", "compiler.version": 16, "compiler.runtime": "MDd"},
options={}, env_vars={}, build_requires={})
builder.add(settings={"arch": "x86_64", "build_type": "Release", "compiler": "Visual Studio", "compiler.version": 16, "compiler.runtime": "MD"},
options={}, env_vars={}, build_requires={})
builder.add(settings={"arch": "x86_64", "build_type": "RelWithDebInfo", "compiler": "Visual Studio", "compiler.version": 16, "compiler.runtime": "MD"},
options={}, env_vars={}, build_requires={})
else:
builder.add(settings={"arch": "x86_64", "build_type": "Debug"},
options={}, env_vars={}, build_requires={})
builder.add(settings={"arch": "x86_64", "build_type": "Release"},
options={}, env_vars={}, build_requires={})
builder.add(settings={"arch": "x86_64", "build_type": "RelWithDebInfo"},
options={}, env_vars={}, build_requires={})
builder.run()
| 62.190476
| 159
| 0.574273
| 130
| 1,306
| 5.523077
| 0.276923
| 0.083565
| 0.150418
| 0.183844
| 0.831476
| 0.831476
| 0.784123
| 0.784123
| 0.711699
| 0.597493
| 0
| 0.03003
| 0.235069
| 1,306
| 20
| 160
| 65.3
| 0.688689
| 0
| 0
| 0.315789
| 0
| 0
| 0.270291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
815548806e275795fcda4717d0fe69721e0b8d55
| 27
|
py
|
Python
|
PythonMisc/__init__.py
|
jwbrooks0/johnspythonlibrary2
|
10ca519276d8c32da0fbd41a597f75c0c98a8736
|
[
"MIT"
] | null | null | null |
PythonMisc/__init__.py
|
jwbrooks0/johnspythonlibrary2
|
10ca519276d8c32da0fbd41a597f75c0c98a8736
|
[
"MIT"
] | null | null | null |
PythonMisc/__init__.py
|
jwbrooks0/johnspythonlibrary2
|
10ca519276d8c32da0fbd41a597f75c0c98a8736
|
[
"MIT"
] | null | null | null |
from ._pythonmisc import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
817161a4114dd4bc2f2aea716e123f38982a1bb0
| 14,140
|
py
|
Python
|
registry/application/handlers/service_handlers.py
|
anandrgitnirman/snet-marketplace-service
|
f31bf741094476b9cb26277f1165deb2856257b1
|
[
"MIT"
] | 14
|
2019-02-12T09:14:52.000Z
|
2021-03-11T18:42:22.000Z
|
registry/application/handlers/service_handlers.py
|
prashantramangupta/snet-marketplace-service
|
7c293054e4b0207deefecc46defd743c064472a4
|
[
"MIT"
] | 1,079
|
2019-01-10T04:31:24.000Z
|
2022-03-29T06:16:42.000Z
|
registry/application/handlers/service_handlers.py
|
prashantramangupta/snet-marketplace-service
|
7c293054e4b0207deefecc46defd743c064472a4
|
[
"MIT"
] | 20
|
2018-12-18T13:06:41.000Z
|
2021-09-17T11:13:01.000Z
|
import sys
sys.path.append('/opt')
import json
from common.constant import StatusCode
from common.exception_handler import exception_handler
from common.exceptions import BadRequestException
from common.logger import get_logger
from common.utils import generate_lambda_response, validate_dict
from registry.application.access_control.authorization import secured
from registry.application.services.service_publisher_service import ServicePublisherService
from registry.application.services.service_transaction_status import ServiceTransactionStatus
from registry.config import NETWORK_ID, SLACK_HOOK
from registry.constants import Action, EnvironmentType
from registry.exceptions import EnvironmentNotFoundException, EXCEPTIONS
from registry.application.services.update_service_assets import UpdateServiceAssets
logger = get_logger(__name__)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def verify_service_id(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
query_parameters = event["queryStringParameters"]
if "org_uuid" not in path_parameters and "service_id" not in query_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_id = query_parameters["service_id"]
response = ServicePublisherService(username, org_uuid, None).get_service_id_availability_status(service_id)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def save_transaction_hash_for_published_service(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
payload = json.loads(event["body"])
if "org_uuid" not in path_parameters and "service_uuid" not in path_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).save_transaction_hash_for_published_service(
payload)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def save_service(event, context):
logger.info(f"Event for save service {event}")
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
payload = json.loads(event["body"])
if not path_parameters.get("org_uuid", "") and not path_parameters.get("service_uuid", ""):
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).save_service(payload)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def save_service_attributes(event, context):
logger.info(f"Event for save service {event}")
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
payload = json.loads(event["body"])
if not path_parameters.get("org_uuid", "") and not path_parameters.get("service_uuid", ""):
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).save_service_attributes(payload)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def create_service(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
payload = json.loads(event["body"])
if not path_parameters.get("org_uuid", ""):
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
response = ServicePublisherService(username, org_uuid, None).create_service(payload)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def get_services_for_organization(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
payload = json.loads(event["body"])
if "org_uuid" not in path_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
response = ServicePublisherService(username, org_uuid, None).get_services_for_organization(payload)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def get_service_for_service_uuid(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
if "org_uuid" not in path_parameters and "service_uuid" not in path_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).get_service_for_given_service_uuid()
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def publish_service_metadata_to_ipfs(event, context):
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
if "org_uuid" not in path_parameters and "service_uuid" not in path_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).publish_service_data_to_ipfs()
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def get_daemon_config_for_current_network(event, context):
logger.info(f"event for get_daemon_config_for_current_network:: {event}")
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
query_parameters = event["queryStringParameters"]
if not validate_dict(path_parameters,
["org_uuid", "service_uuid", "group_id"]) or 'network' not in query_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
group_id = path_parameters["group_id"]
if query_parameters["network"] == EnvironmentType.TEST.value:
response = ServicePublisherService(username, org_uuid, service_uuid).daemon_config(
environment=EnvironmentType.TEST.value)
elif query_parameters["network"] == EnvironmentType.MAIN.value:
response = ServicePublisherService(username, org_uuid, service_uuid).daemon_config(
environment=EnvironmentType.MAIN.value)
else:
raise EnvironmentNotFoundException()
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def get_service_details_using_org_id_service_id(event, context):
logger.info(f"event: {event}")
query_parameters = event["queryStringParameters"]
if not validate_dict(query_parameters, ["org_id", "service_id"]):
raise BadRequestException()
org_id = query_parameters["org_id"]
service_id = query_parameters["service_id"]
response = ServicePublisherService.get_service_for_org_id_and_service_id(org_id, service_id)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def service_deployment_status_notification_handler(event, context):
logger.info(f"Service Build status event {event}")
org_id = event['org_id']
service_id = event['service_id']
build_status = int(event['build_status'])
ServicePublisherService("BUILD_PROCESS", "", "").service_build_status_notifier(org_id, service_id, build_status)
return generate_lambda_response(
StatusCode.CREATED,
{"status": "success", "data": "Build failure notified", "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def update_transaction(event, context):
logger.info(f"Update transaction event :: {event}")
ServiceTransactionStatus().update_transaction_status()
return generate_lambda_response(StatusCode.OK, "OK")
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def get_code_build_status_for_service(event, context):
logger.info(f"Get code build status event :: {event}")
path_parameters = event["pathParameters"]
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(org_uuid=org_uuid, service_uuid=service_uuid, username=None) \
.get_service_demo_component_build_status()
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def update_service_assets(event, context):
logger.info(f"Update service assets event :: {event}")
response = UpdateServiceAssets().validate_and_process_service_assets(payload=event)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def update_demo_component_build_status(event, context):
logger.info(f"Demo component build status update event :: {event}")
org_uuid = event['org_uuid']
service_uuid = event['service_uuid']
build_status = event['build_status']
build_id = event['build_id']
filename = event['filename']
response = UpdateServiceAssets()\
.update_demo_component_build_status(org_uuid=org_uuid, service_uuid=service_uuid,
build_status=build_status, build_id=build_id,
filename=filename)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
@secured(action=Action.CREATE, org_uuid_path=("pathParameters", "org_uuid"),
username_path=("requestContext", "authorizer", "claims", "email"))
def publish_service(event, context):
logger.info(f"Publish service event::{event}")
username = event["requestContext"]["authorizer"]["claims"]["email"]
path_parameters = event["pathParameters"]
if "org_uuid" not in path_parameters and "service_uuid" not in path_parameters:
raise BadRequestException()
org_uuid = path_parameters["org_uuid"]
service_uuid = path_parameters["service_uuid"]
response = ServicePublisherService(username, org_uuid, service_uuid).publish_service_data()
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": response, "error": {}}, cors_enabled=True
)
| 49.788732
| 117
| 0.736846
| 1,606
| 14,140
| 6.191158
| 0.07472
| 0.048577
| 0.033189
| 0.070401
| 0.808609
| 0.787589
| 0.760032
| 0.737604
| 0.710852
| 0.707231
| 0
| 0
| 0.141938
| 14,140
| 284
| 118
| 49.788732
| 0.819501
| 0
| 0
| 0.596
| 0
| 0
| 0.169578
| 0.007213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064
| false
| 0
| 0.056
| 0
| 0.184
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
819e70addddb3290dffdeb3c906eab23eb0bcb75
| 38
|
py
|
Python
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 1,062
|
2015-11-18T01:04:33.000Z
|
2022-03-29T07:13:30.000Z
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 26
|
2015-11-17T06:58:07.000Z
|
2022-01-15T18:11:16.000Z
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 100
|
2015-11-17T09:01:22.000Z
|
2021-09-12T13:58:28.000Z
|
from .onelinerizer import onelinerize
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
819eee92e1b01ffcddcab758721e62152875f417
| 6,020
|
py
|
Python
|
test/pytest/test_regexes.py
|
showipintbri/ttp
|
10b8767e67ec39ed4e30769d36e6fb6e5b0ed265
|
[
"MIT"
] | 254
|
2019-09-23T15:37:13.000Z
|
2022-03-24T18:56:56.000Z
|
test/pytest/test_regexes.py
|
showipintbri/ttp
|
10b8767e67ec39ed4e30769d36e6fb6e5b0ed265
|
[
"MIT"
] | 71
|
2019-09-26T16:32:55.000Z
|
2022-03-31T15:57:12.000Z
|
test/pytest/test_regexes.py
|
showipintbri/ttp
|
10b8767e67ec39ed4e30769d36e6fb6e5b0ed265
|
[
"MIT"
] | 38
|
2019-10-18T03:43:42.000Z
|
2022-01-19T20:03:33.000Z
|
import sys
sys.path.insert(0, "../..")
import pprint
import logging
logging.basicConfig(level="INFO")
from ttp import ttp
def test_pipe_separated_regexes():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip | re("IP")}} {{ age | re(r"\\d+") }} {{ mac }} ARPA {{ interface | re("INTF_RE") }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950.5785.5cd1",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "0950.5C8A.5c41",
},
]
}
]
]
# test_pipe_separated_regexes()
def test_multiple_inline_regexes():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.1 98 0950.5785.5cd1 ARPA FastEthernet2.13
Internet 10.12.13.2 98 0950.5785.5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.4 198 0950.5C8A.5c41 ARPA GigabitEthernet2.17
</input>
<vars>
INTF_RE = r"GigabitEthernet\\S+|Fast\\S+"
</vars>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac }} ARPA {{ interface | re(r"GigabitEthernet\\S+") | re(r"Fast\\S+") }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950.5785.5cd1",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "0950.5C8A.5c41",
},
]
}
]
]
# test_multiple_inline_regexes()
def test_MAC_regex_formatter():
template = """
<input load="text">
Protocol Address Age (min) Hardware Addr Type Interface
Internet 10.12.13.2 98 0950:5785:5cd2 ARPA Loopback0
Internet 10.12.13.3 131 0150.7685.14d5 ARPA GigabitEthernet2.13
Internet 10.12.13.1 98 0950-5785-5cd1 ARPA FastEthernet2.13
Internet 10.12.13.4 198 09:50:5C:8A:5c:41 ARPA GigabitEthernet2.17
Internet 10.12.13.5 198 09.50.5C.8A.5c.41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09-50-5C-8A-5c-41 ARPA GigabitEthernet2.17
Internet 10.12.13.6 198 09505C8A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C8:A5c41 ARPA GigabitEthernet2.will_not_match
Internet 10.12.13.6 198 09505C.8.A5c41 ARPA GigabitEthernet2.will_not_match
</input>
<group name="arp_test">
Internet {{ ip }} {{ age }} {{ mac | MAC }} ARPA {{ interface }}
</group>
"""
parser = ttp(template=template)
parser.parse()
res = parser.result()
# pprint.pprint(res)
assert res == [
[
{
"arp_test": [
{
"age": "98",
"interface": "Loopback0",
"ip": "10.12.13.2",
"mac": "0950:5785:5cd2",
},
{
"age": "131",
"interface": "GigabitEthernet2.13",
"ip": "10.12.13.3",
"mac": "0150.7685.14d5",
},
{
"age": "98",
"interface": "FastEthernet2.13",
"ip": "10.12.13.1",
"mac": "0950-5785-5cd1",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.4",
"mac": "09:50:5C:8A:5c:41",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.5",
"mac": "09.50.5C.8A.5c.41",
},
{
"age": "198",
"interface": "GigabitEthernet2.17",
"ip": "10.12.13.6",
"mac": "09-50-5C-8A-5c-41",
},
]
}
]
]
# test_MAC_regex_formatter()
| 31.851852
| 112
| 0.421429
| 589
| 6,020
| 4.251273
| 0.154499
| 0.046326
| 0.069489
| 0.095048
| 0.84984
| 0.84984
| 0.833067
| 0.826677
| 0.811502
| 0.811502
| 0
| 0.189922
| 0.442857
| 6,020
| 188
| 113
| 32.021277
| 0.556649
| 0.02392
| 0
| 0.559006
| 0
| 0.031056
| 0.492759
| 0.03084
| 0
| 0
| 0
| 0
| 0.018634
| 1
| 0.018634
| false
| 0
| 0.024845
| 0
| 0.043478
| 0.006211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
81a712fc0a1ca3fd1d60c1d20884115c33cc34d5
| 77
|
py
|
Python
|
tst/test_fireFoxBrowser.py
|
Xiaoyu-Xing/youtube-algorithmic-bias
|
2643f9ba59fa5ad1757bc645a5a6bcf45061e21b
|
[
"MIT"
] | 2
|
2019-02-20T16:35:00.000Z
|
2019-02-22T02:29:34.000Z
|
tst/test_fireFoxBrowser.py
|
Xiaoyu-Xing/youtube-algorithmic-bias
|
2643f9ba59fa5ad1757bc645a5a6bcf45061e21b
|
[
"MIT"
] | null | null | null |
tst/test_fireFoxBrowser.py
|
Xiaoyu-Xing/youtube-algorithmic-bias
|
2643f9ba59fa5ad1757bc645a5a6bcf45061e21b
|
[
"MIT"
] | 2
|
2020-06-10T04:42:05.000Z
|
2021-04-30T01:09:56.000Z
|
from unittest import TestCase
class TestFireFoxBrowser(TestCase):
pass
| 12.833333
| 35
| 0.792208
| 8
| 77
| 7.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 77
| 5
| 36
| 15.4
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
81ddf4efd987baa173741271f80cf21b6476eb8a
| 231
|
py
|
Python
|
src/config/utils/custom_pagination.py
|
amirpsd/drf_blog_api
|
58be081a450840114af021e7412e469fad90456d
|
[
"MIT"
] | 33
|
2022-02-11T12:16:29.000Z
|
2022-03-26T15:08:47.000Z
|
src/config/utils/custom_pagination.py
|
amirpsd/django_blog_api
|
58be081a450840114af021e7412e469fad90456d
|
[
"MIT"
] | null | null | null |
src/config/utils/custom_pagination.py
|
amirpsd/django_blog_api
|
58be081a450840114af021e7412e469fad90456d
|
[
"MIT"
] | 5
|
2022-02-11T13:03:52.000Z
|
2022-03-28T16:04:32.000Z
|
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class CustomPagination(PageNumberPagination):
def get_paginated_response(self, data):
return Response(data)
| 25.666667
| 58
| 0.805195
| 24
| 231
| 7.583333
| 0.625
| 0.087912
| 0.186813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147186
| 231
| 9
| 59
| 25.666667
| 0.923858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c4eca49ca3e8a8cdb08d085684aeb077bcb0da95
| 110
|
py
|
Python
|
ch01/helloworld/src/pages/views.py
|
Redhat8983/Redhat8983-learning-django2
|
fc0d5a9d93a7b15f1235d2c172e4bba50b884018
|
[
"Unlicense"
] | 1
|
2022-02-21T06:48:26.000Z
|
2022-02-21T06:48:26.000Z
|
ch01/helloworld/src/pages/views.py
|
Redhat8983/learning-django2
|
fc0d5a9d93a7b15f1235d2c172e4bba50b884018
|
[
"Unlicense"
] | null | null | null |
ch01/helloworld/src/pages/views.py
|
Redhat8983/learning-django2
|
fc0d5a9d93a7b15f1235d2c172e4bba50b884018
|
[
"Unlicense"
] | null | null | null |
from django.http import HttpResponse
def home_page_view(request):
return HttpResponse('Hello, Worlds!!')
| 22
| 42
| 0.772727
| 14
| 110
| 5.928571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 110
| 4
| 43
| 27.5
| 0.864583
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1eeade2b2eff7c021bdf24444d77665b69e99e87
| 116
|
py
|
Python
|
core/message_handler/message_sender_from_db.py
|
taimaskhanov11/AsyncVkAccount
|
58c48886545aa581eb28a1071d52e0a60aa1b8ea
|
[
"MIT"
] | 1
|
2021-12-26T20:40:39.000Z
|
2021-12-26T20:40:39.000Z
|
core/message_handler/message_sender_from_db.py
|
taimaskhanov11/AsyncVkAccount
|
58c48886545aa581eb28a1071d52e0a60aa1b8ea
|
[
"MIT"
] | 1
|
2021-12-03T18:38:38.000Z
|
2021-12-03T18:39:08.000Z
|
core/message_handler/message_sender_from_db.py
|
taimaskhanov11/AsyncVkAccount
|
58c48886545aa581eb28a1071d52e0a60aa1b8ea
|
[
"MIT"
] | null | null | null |
from core.message_handler.message_sender import MessageSender
class MessageSenderFromDb(MessageSender):
pass
| 16.571429
| 61
| 0.836207
| 12
| 116
| 7.916667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 116
| 6
| 62
| 19.333333
| 0.931373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6f8fb06b522d444ecf71afd635cd35139e725c57
| 558
|
py
|
Python
|
lib/python/pyflyby/importdb.py
|
azjps/pyflyby
|
fd837e9686e56a7f88eefcf1d28313915affbd3e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/python/pyflyby/importdb.py
|
azjps/pyflyby
|
fd837e9686e56a7f88eefcf1d28313915affbd3e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/python/pyflyby/importdb.py
|
azjps/pyflyby
|
fd837e9686e56a7f88eefcf1d28313915affbd3e
|
[
"BSD-3-Clause"
] | null | null | null |
# pyflyby/importdb.py.
# Copyright (C) 2011, 2012, 2013, 2014 Karl Chen.
# License: MIT http://opensource.org/licenses/MIT
# Deprecated stub for backwards compatibility.
from __future__ import absolute_import, division, with_statement
from pyflyby._importdb import ImportDB
def global_known_imports():
# Deprecated stub for backwards compatibility.
return ImportDB.get_default(".").known_imports
def global_mandatory_imports():
# Deprecated stub for backwards compatibility.
return ImportDB.get_default(".").mandatory_imports
| 27.9
| 64
| 0.765233
| 66
| 558
| 6.242424
| 0.560606
| 0.101942
| 0.123786
| 0.18932
| 0.434466
| 0.339806
| 0.339806
| 0.339806
| 0.339806
| 0.339806
| 0
| 0.033684
| 0.148746
| 558
| 19
| 65
| 29.368421
| 0.833684
| 0.449821
| 0
| 0
| 0
| 0
| 0.006667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 1
| 0.333333
| 1.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6fa864c45ec99157d37fc19a4b427bdda37a627d
| 21
|
py
|
Python
|
yolo/vedanet/network/head/brick/__init__.py
|
hilman-dayo/ObjectDetection-OneStageDet
|
44054ad335e24e99a98fdad0d18b9bf3a80c941c
|
[
"MIT"
] | 331
|
2020-06-05T05:10:21.000Z
|
2022-03-29T07:32:42.000Z
|
vedanet/network/head/brick/__init__.py
|
xiongcaihua/ObjectDetection-OneStageDet
|
d29f69cdce32b006bd040edb6e66427b3c987c70
|
[
"Apache-2.0"
] | 10
|
2020-06-12T07:53:42.000Z
|
2021-05-11T00:09:10.000Z
|
vedanet/network/head/brick/__init__.py
|
xiongcaihua/ObjectDetection-OneStageDet
|
d29f69cdce32b006bd040edb6e66427b3c987c70
|
[
"Apache-2.0"
] | 84
|
2020-06-05T10:21:11.000Z
|
2022-03-27T23:42:44.000Z
|
from . import yolov3
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6fa958a127643df28e086c6eed2e50b037d9b74b
| 812
|
py
|
Python
|
sorceress/test___init__.py
|
altunenes/sorceress
|
1ee36bbd27ebe3ae41293e3df44c7d3dd150502c
|
[
"MIT"
] | 6
|
2021-09-20T14:47:34.000Z
|
2022-03-09T12:35:48.000Z
|
sorceress/test___init__.py
|
altunenes/sorceress
|
1ee36bbd27ebe3ae41293e3df44c7d3dd150502c
|
[
"MIT"
] | null | null | null |
sorceress/test___init__.py
|
altunenes/sorceress
|
1ee36bbd27ebe3ae41293e3df44c7d3dd150502c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
class Testsorcerer(TestCase):
def test_chromatic(self):
return
def test_dotill(self):
return
def test_realtimegrid(self):
return
def test_addlines(self):
return
def test_addlines_alpha(self):
return
def test_eyecolour(self):
return
def test_dakin_pex(self):
return
def test_bruno(self):
return
def test_dolboeuf(self):
return
def test_kanizsa(self):
return
def test_ponzol(self):
return
def test_t_aki2001(self):
return
def test_cafe_wall(self):
return
def test_ccob(self):
return
def test_ebbinghaus(self):
return
def test_whiteill(self):
return
def test_enigma(self):
return
| 21.368421
| 34
| 0.610837
| 96
| 812
| 4.947917
| 0.3125
| 0.250526
| 0.437895
| 0.572632
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007273
| 0.32266
| 812
| 38
| 35
| 21.368421
| 0.856364
| 0
| 0
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.472222
| false
| 0
| 0.027778
| 0.472222
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
82ea330aae6d4f6b9f65440d120a2656dcb64e13
| 158
|
py
|
Python
|
Python Snippets/sorting/__init__.py
|
wolfnfox/Code-Snippets
|
993cb2b273d538bdeb76ff3a39fa41a92a6282de
|
[
"MIT"
] | null | null | null |
Python Snippets/sorting/__init__.py
|
wolfnfox/Code-Snippets
|
993cb2b273d538bdeb76ff3a39fa41a92a6282de
|
[
"MIT"
] | null | null | null |
Python Snippets/sorting/__init__.py
|
wolfnfox/Code-Snippets
|
993cb2b273d538bdeb76ff3a39fa41a92a6282de
|
[
"MIT"
] | null | null | null |
# import os, sys
# parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# if parentdir not in sys.path:
# sys.path.insert(0,parentdir)
| 39.5
| 74
| 0.721519
| 25
| 158
| 4.4
| 0.52
| 0.163636
| 0.236364
| 0.272727
| 0.290909
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.120253
| 158
| 4
| 75
| 39.5
| 0.784173
| 0.949367
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d21c785473a527861d4b0848610313159a104f91
| 232
|
py
|
Python
|
installer_scoring/installer_scoring/doctype/installer_scoring_sub_parameter_value_and_score/installer_scoring_sub_parameter_value_and_score.py
|
Manisvb123/TestAll
|
39fc4e59e1fe58e9778f57f9bbfcfd8e2555e938
|
[
"MIT"
] | null | null | null |
installer_scoring/installer_scoring/doctype/installer_scoring_sub_parameter_value_and_score/installer_scoring_sub_parameter_value_and_score.py
|
Manisvb123/TestAll
|
39fc4e59e1fe58e9778f57f9bbfcfd8e2555e938
|
[
"MIT"
] | null | null | null |
installer_scoring/installer_scoring/doctype/installer_scoring_sub_parameter_value_and_score/installer_scoring_sub_parameter_value_and_score.py
|
Manisvb123/TestAll
|
39fc4e59e1fe58e9778f57f9bbfcfd8e2555e938
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, mani.v@gmail.com and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class InstallerScoringSubParameterValueandScore(Document):
pass
| 25.777778
| 58
| 0.814655
| 28
| 232
| 6.75
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019512
| 0.116379
| 232
| 8
| 59
| 29
| 0.902439
| 0.49569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d2238e7f4be6a0a7ebca49eb61c9c09919f05f0d
| 115
|
py
|
Python
|
QuickCoders/views.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
QuickCoders/views.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
QuickCoders/views.py
|
hpathipati/Quick-Tutor
|
17476d79b87f51b12a6c8fc435d1a6506bff1e04
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
from django.shortcuts import redirect, render
def homepage(request):
return render(request, 'study/home.html')
| 28.75
| 45
| 0.773913
| 15
| 115
| 5.933333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121739
| 115
| 4
| 46
| 28.75
| 0.881188
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
968f657c4f13e635a498fb57cd2f6c0bf9f3b053
| 35
|
py
|
Python
|
src/lib/dis.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/dis.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/dis.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("dis")
| 17.5
| 34
| 0.742857
| 6
| 35
| 3.5
| 0.666667
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
96a030ae4b62523a577060ac279fb7753433f9f5
| 28
|
py
|
Python
|
skssl/meta/__init__.py
|
YannDubs/Semi-Supervised-Neural-Processes
|
77176131923817f3a165883dd6fca7b9f1e9d0b3
|
[
"MIT"
] | 5
|
2019-06-19T11:11:56.000Z
|
2020-07-03T08:42:36.000Z
|
skssl/meta/__init__.py
|
YannDubs/Semi-Supervised-Neural-Processes
|
77176131923817f3a165883dd6fca7b9f1e9d0b3
|
[
"MIT"
] | null | null | null |
skssl/meta/__init__.py
|
YannDubs/Semi-Supervised-Neural-Processes
|
77176131923817f3a165883dd6fca7b9f1e9d0b3
|
[
"MIT"
] | null | null | null |
from .selftraining import *
| 14
| 27
| 0.785714
| 3
| 28
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73792c24fe46fc36ce796ed077332b0f6d4f0760
| 26
|
py
|
Python
|
hogwarts/utils/__init__.py
|
PingchuanMa/hogwarts
|
404e1d524fee4f190d8de1c1e8bc0711d895089a
|
[
"MIT"
] | 4
|
2019-10-12T04:55:03.000Z
|
2019-11-25T22:30:41.000Z
|
hogwarts/utils/__init__.py
|
PingchuanMa/hogwarts
|
404e1d524fee4f190d8de1c1e8bc0711d895089a
|
[
"MIT"
] | null | null | null |
hogwarts/utils/__init__.py
|
PingchuanMa/hogwarts
|
404e1d524fee4f190d8de1c1e8bc0711d895089a
|
[
"MIT"
] | null | null | null |
from . import tensorboard
| 13
| 25
| 0.807692
| 3
| 26
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
739253a8461f536eb86edbee8c872a23faa56bb0
| 4,180
|
py
|
Python
|
applications/plot_data_paper2.py
|
RaulRPrado/tev-binaries-model
|
c60959caaffbcdf3398914b03531647f95e97da0
|
[
"Apache-2.0"
] | 1
|
2020-06-03T15:39:38.000Z
|
2020-06-03T15:39:38.000Z
|
applications/plot_data_paper2.py
|
RaulRPrado/tev-binaries-model
|
c60959caaffbcdf3398914b03531647f95e97da0
|
[
"Apache-2.0"
] | null | null | null |
applications/plot_data_paper2.py
|
RaulRPrado/tev-binaries-model
|
c60959caaffbcdf3398914b03531647f95e97da0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import matplotlib.pyplot as plt
import logging
import math
from astropy import units as u
from tgblib import util
from tgblib.data import get_data, get_data_ul
logging.getLogger().setLevel(logging.INFO)
if __name__ == '__main__':
util.set_my_fonts(mode='talk')
show = False
label = 'std'
NU_TITLE = {
0: 'Nu1a',
1: 'Nu1b',
2: 'Nu2a',
3: 'none',
4: 'Nu2b'
}
VTS_TITLE = {
0: 'Ve1a',
1: 'Ve1b',
2: 'Ve2a',
3: 'Ve2b',
4: 'Ve2c'
}
MARKERS = {
0: 'o',
1: 's',
2: 'o',
3: 's',
4: '*'
}
COLORS = {
0: 'k',
1: 'r',
2: 'k',
3: 'r',
4: 'b'
}
MINOR_TICK = 7.5
MAJOR_TICK = 12
keV_to_TeV = u.keV.to(u.TeV)
# 2017
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'$E^2\;\mathrm{d}N/\mathrm{d}E\;[\mathrm{erg\;s^{-1}\;cm^{-2}}]$')
ax.set_xlabel(r'$E\;[\mathrm{TeV}]$')
ax.tick_params(which='minor', length=MINOR_TICK)
ax.tick_params(which='major', length=MAJOR_TICK)
for nn, iper in enumerate([0, 1]):
vtsEnergy, vtsFlux, vtsFluxErr = get_data(iper, onlyVTS=True, GT=True)
vtsEnergyUL, vtsFluxUL = get_data_ul(iper, GT=True)
ax.errorbar(
[e * (1 + 0.02 * nn) * keV_to_TeV for e in vtsEnergy],
vtsFlux,
yerr=vtsFluxErr,
color=COLORS[iper],
linestyle='none',
label=VTS_TITLE[iper],
marker=MARKERS[iper]
)
if len(vtsEnergyUL) > 0:
vtsFluxErrUL = [p - pow(10, math.log10(p) - 0.1) for p in vtsFluxUL]
ax.errorbar(
[e * keV_to_TeV for e in vtsEnergyUL],
vtsFluxUL,
yerr=vtsFluxErrUL,
uplims=True,
color=COLORS[iper],
linestyle='none',
marker=MARKERS[iper]
)
ax.set_ylim(0.8e-13, 5e-12)
ax.set_xlim(3e-1, 2e1)
myTicks = [1e0, 1e1]
myLabels = [r'$10^{0}$', r'$10^{1}$']
ax.set_xticks(myTicks)
ax.set_xticklabels(myLabels)
ax.legend(loc='best', frameon=False)
plt.savefig(
'figures/DataVTS_2017.png',
format='png',
bbox_inches='tight'
)
plt.savefig(
'figures/DataVTS_2017.pdf',
format='pdf',
bbox_inches='tight'
)
# 2019
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'$E^2\;\mathrm{d}N/\mathrm{d}E\;[\mathrm{erg\;s^{-1}\;cm^{-2}}]$')
ax.set_xlabel(r'$E\;[\mathrm{TeV}]$')
ax.tick_params(which='minor', length=MINOR_TICK)
ax.tick_params(which='major', length=MAJOR_TICK)
for nn, iper in enumerate([2, 3, 4]):
vtsEnergy, vtsFlux, vtsFluxErr = get_data(iper, onlyVTS=True)
vtsEnergyUL, vtsFluxUL = get_data_ul(iper)
ax.errorbar(
[e * (1 + 0.02 * nn) * keV_to_TeV for e in vtsEnergy],
vtsFlux,
yerr=vtsFluxErr,
color=COLORS[iper],
linestyle='none',
label=VTS_TITLE[iper],
marker=MARKERS[iper]
)
if len(vtsEnergyUL) > 0:
vtsFluxErrUL = [p - pow(10, math.log10(p) - 0.1) for p in vtsFluxUL]
ax.errorbar(
[e * keV_to_TeV for e in vtsEnergyUL],
vtsFluxUL,
yerr=vtsFluxErrUL,
uplims=True,
color=COLORS[iper],
linestyle='none',
marker=MARKERS[iper]
)
ax.set_ylim(0.8e-13, 5e-12)
ax.set_xlim(3e-1, 2e1)
myTicks = [1e0, 1e1]
myLabels = [r'$10^{0}$', r'$10^{1}$']
ax.set_xticks(myTicks)
ax.set_xticklabels(myLabels)
ax.legend(loc='best', frameon=False)
plt.savefig(
'figures/DataVTS_2019.png',
format='png',
bbox_inches='tight'
)
plt.savefig(
'figures/DataVTS_2019.pdf',
format='pdf',
bbox_inches='tight'
)
| 25.180723
| 85
| 0.517464
| 536
| 4,180
| 3.906716
| 0.266791
| 0.038204
| 0.019102
| 0.032474
| 0.815664
| 0.808023
| 0.782235
| 0.748806
| 0.702961
| 0.702961
| 0
| 0.048789
| 0.32823
| 4,180
| 165
| 86
| 25.333333
| 0.696937
| 0.006459
| 0
| 0.565217
| 0
| 0.014493
| 0.107229
| 0.053494
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb4ab11cea1f2df276c4d6fd660056e068eff33a
| 14,976
|
py
|
Python
|
s3tests_boto3/functional/test_sts.py
|
TheRealGchen/s3-tests
|
67caf0f85489a50ef628aa1fd28cbea9be989bdd
|
[
"MIT"
] | null | null | null |
s3tests_boto3/functional/test_sts.py
|
TheRealGchen/s3-tests
|
67caf0f85489a50ef628aa1fd28cbea9be989bdd
|
[
"MIT"
] | null | null | null |
s3tests_boto3/functional/test_sts.py
|
TheRealGchen/s3-tests
|
67caf0f85489a50ef628aa1fd28cbea9be989bdd
|
[
"MIT"
] | 2
|
2021-06-23T16:09:24.000Z
|
2021-10-01T17:00:42.000Z
|
import boto3
import botocore.session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import isodate
import email.utils
import datetime
import threading
import re
import pytz
from collections import OrderedDict
import requests
import json
import base64
import hmac
import hashlib
import xml.etree.ElementTree as ET
import time
import operator
import nose
import os
import string
import random
import socket
import ssl
import logging
from collections import namedtuple
from email.header import decode_header
from . import(
get_iam_client,
get_sts_client,
get_client,
get_alt_user_id,
get_config_endpoint,
get_new_bucket_name,
get_parameter_name,
get_main_aws_access_key,
get_main_aws_secret_key,
get_thumbprint,
get_aud,
get_token,
get_realm_name,
check_webidentity
)
log = logging.getLogger(__name__)
def create_role(iam_client,path,rolename,policy_document,description,sessionduration,permissionboundary):
role_err=None
if rolename is None:
rolename=get_parameter_name()
try:
role_response = iam_client.create_role(Path=path,RoleName=rolename,AssumeRolePolicyDocument=policy_document,)
except ClientError as e:
role_err = e.response['Code']
return (role_err,role_response,rolename)
def put_role_policy(iam_client,rolename,policyname,role_policy):
role_err=None
if policyname is None:
policyname=get_parameter_name()
try:
role_response = iam_client.put_role_policy(RoleName=rolename,PolicyName=policyname,PolicyDocument=role_policy)
except ClientError as e:
role_err = e.response['Code']
return (role_err,role_response)
def put_user_policy(iam_client,username,policyname,policy_document):
role_err=None
if policyname is None:
policyname=get_parameter_name()
try:
role_response = iam_client.put_user_policy(UserName=username,PolicyName=policyname,PolicyDocument=policy_document)
except ClientError as e:
role_err = e.response['Code']
return (role_err,role_response)
@attr(resource='get session token')
@attr(method='get')
@attr(operation='check')
@attr(assertion='s3 ops only accessible by temporary credentials')
@attr('test_of_sts')
def test_get_session_token():
iam_client=get_iam_client()
sts_client=get_sts_client()
sts_user_id=get_alt_user_id()
default_endpoint=get_config_endpoint()
user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
(resp_err,resp)=put_user_policy(iam_client,sts_user_id,None,user_policy)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
response=sts_client.get_session_token()
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
s3_client=boto3.client('s3',
aws_access_key_id = response['Credentials']['AccessKeyId'],
aws_secret_access_key = response['Credentials']['SecretAccessKey'],
aws_session_token = response['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
finish=s3_client.delete_bucket(Bucket=bucket_name)
@attr(resource='get session token')
@attr(method='get')
@attr(operation='check')
@attr(assertion='s3 ops denied by permanent credentials')
@attr('test_of_sts')
def test_get_session_token_permanent_creds_denied():
s3bucket_error=None
iam_client=get_iam_client()
sts_client=get_sts_client()
sts_user_id=get_alt_user_id()
default_endpoint=get_config_endpoint()
s3_main_access_key=get_main_aws_access_key()
s3_main_secret_key=get_main_aws_secret_key()
user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
(resp_err,resp)=put_user_policy(iam_client,sts_user_id,None,user_policy)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
response=sts_client.get_session_token()
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
s3_client=boto3.client('s3',
aws_access_key_id = s3_main_access_key,
aws_secret_access_key = s3_main_secret_key,
aws_session_token = response['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
try:
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
except ClientError as e:
s3bucket_error = e.response.get("Error", {}).get("Code")
eq(s3bucket_error,'AccessDenied')
@attr(resource='assume role')
@attr(method='get')
@attr(operation='check')
@attr(assertion='role policy allows all s3 ops')
@attr('test_of_sts')
def test_assume_role_allow():
iam_client=get_iam_client()
sts_client=get_sts_client()
sts_user_id=get_alt_user_id()
default_endpoint=get_config_endpoint()
role_session_name=get_parameter_name()
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
(role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
(role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
s3_client = boto3.client('s3',
aws_access_key_id = resp['Credentials']['AccessKeyId'],
aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
aws_session_token = resp['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
bkt = s3_client.delete_bucket(Bucket=bucket_name)
eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
@attr(resource='assume role')
@attr(method='get')
@attr(operation='check')
@attr(assertion='role policy denies all s3 ops')
@attr('test_of_sts')
def test_assume_role_deny():
s3bucket_error=None
iam_client=get_iam_client()
sts_client=get_sts_client()
sts_user_id=get_alt_user_id()
default_endpoint=get_config_endpoint()
role_session_name=get_parameter_name()
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
(role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
(role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
s3_client = boto3.client('s3',
aws_access_key_id = resp['Credentials']['AccessKeyId'],
aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
aws_session_token = resp['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
try:
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
except ClientError as e:
s3bucket_error = e.response.get("Error", {}).get("Code")
eq(s3bucket_error,'AccessDenied')
@attr(resource='assume role')
@attr(method='get')
@attr(operation='check')
@attr(assertion='creds expire so all s3 ops fails')
@attr('test_of_sts')
def test_assume_role_creds_expiry():
iam_client=get_iam_client()
sts_client=get_sts_client()
sts_user_id=get_alt_user_id()
default_endpoint=get_config_endpoint()
role_session_name=get_parameter_name()
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
(role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
(role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,DurationSeconds=900)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
time.sleep(900)
s3_client = boto3.client('s3',
aws_access_key_id = resp['Credentials']['AccessKeyId'],
aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
aws_session_token = resp['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
try:
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
except ClientError as e:
s3bucket_error = e.response.get("Error", {}).get("Code")
eq(s3bucket_error,'AccessDenied')
@attr(resource='assume role with web identity')
@attr(method='get')
@attr(operation='check')
@attr(assertion='assuming role through web token')
@attr('webidentity_test')
def test_assume_role_with_web_identity():
check_webidentity()
iam_client=get_iam_client()
sts_client=get_sts_client()
default_endpoint=get_config_endpoint()
role_session_name=get_parameter_name()
thumbprint=get_thumbprint()
aud=get_aud()
token=get_token()
realm=get_realm_name()
oidc_response = iam_client.create_open_id_connect_provider(
Url='http://localhost:8080/auth/realms/{}'.format(realm),
ThumbprintList=[
thumbprint,
],
)
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
(role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
(role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
s3_client = boto3.client('s3',
aws_access_key_id = resp['Credentials']['AccessKeyId'],
aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
aws_session_token = resp['Credentials']['SessionToken'],
endpoint_url=default_endpoint,
region_name='',
)
bucket_name = get_new_bucket_name()
s3bucket = s3_client.create_bucket(Bucket=bucket_name)
eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
bkt = s3_client.delete_bucket(Bucket=bucket_name)
eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
oidc_remove=iam_client.delete_open_id_connect_provider(
OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
)
'''
@attr(resource='assume role with web identity')
@attr(method='get')
@attr(operation='check')
@attr(assertion='assume_role_with_web_token creds expire')
@attr('webidentity_test')
def test_assume_role_with_web_identity_invalid_webtoken():
resp_error=None
iam_client=get_iam_client()
sts_client=get_sts_client()
default_endpoint=get_config_endpoint()
role_session_name=get_parameter_name()
thumbprint=get_thumbprint()
aud=get_aud()
token=get_token()
realm=get_realm_name()
oidc_response = iam_client.create_open_id_connect_provider(
Url='http://localhost:8080/auth/realms/{}'.format(realm),
ThumbprintList=[
thumbprint,
],
)
policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
(role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
(role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
eq(response['ResponseMetadata']['HTTPStatusCode'],200)
resp=""
try:
resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken='abcdef')
except InvalidIdentityTokenException as e:
log.debug('{}'.format(resp))
log.debug('{}'.format(e.response.get("Error", {}).get("Code")))
log.debug('{}'.format(e))
resp_error = e.response.get("Error", {}).get("Code")
eq(resp_error,'AccessDenied')
oidc_remove=iam_client.delete_open_id_connect_provider(
OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
)
'''
| 41.94958
| 340
| 0.71067
| 1,852
| 14,976
| 5.432505
| 0.102052
| 0.033098
| 0.05248
| 0.017891
| 0.844946
| 0.830633
| 0.815426
| 0.811848
| 0.801809
| 0.801809
| 0
| 0.018131
| 0.116119
| 14,976
| 356
| 341
| 42.067416
| 0.741935
| 0
| 0
| 0.622711
| 0
| 0
| 0.155625
| 0.001862
| 0
| 0
| 0
| 0
| 0.021978
| 1
| 0.032967
| false
| 0
| 0.117216
| 0
| 0.161172
| 0.014652
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb61153d6890ae1969964e8e7074586cfa3bb578
| 1,901
|
py
|
Python
|
test/test_mice.py
|
drkarthi/fancyimpute
|
f89df71bc7253057aea9ac27f397a4af26579836
|
[
"Apache-2.0"
] | null | null | null |
test/test_mice.py
|
drkarthi/fancyimpute
|
f89df71bc7253057aea9ac27f397a4af26579836
|
[
"Apache-2.0"
] | null | null | null |
test/test_mice.py
|
drkarthi/fancyimpute
|
f89df71bc7253057aea9ac27f397a4af26579836
|
[
"Apache-2.0"
] | null | null | null |
from fancyimpute import MICE
from low_rank_data import XY, XY_incomplete, missing_mask
from common import reconstruction_error
def test_mice_column_with_low_rank_random_matrix():
mice = MICE(n_imputations=100, impute_type='col')
XY_completed = mice.complete(XY_incomplete)
_, missing_mae = reconstruction_error(
XY,
XY_completed,
missing_mask,
name="MICE (impute_type=col)")
assert missing_mae < 0.1, "Error too high with column method!"
def test_mice_row_with_low_rank_random_matrix():
mice = MICE(n_imputations=100, impute_type='pmm')
XY_completed = mice.complete(XY_incomplete)
_, missing_mae = reconstruction_error(
XY,
XY_completed,
missing_mask,
name="MICE (impute_type=row)")
assert missing_mae < 0.1, "Error too high with PMM method!"
def test_mice_column_with_low_rank_random_matrix_approximate():
mice = MICE(n_imputations=100, impute_type='col', n_nearest_columns=5)
XY_completed = mice.complete(XY_incomplete)
_, missing_mae = reconstruction_error(
XY,
XY_completed,
missing_mask,
name="MICE (impute_type=col)")
assert missing_mae < 0.1, "Error too high with approximate column method!"
def test_mice_row_with_low_rank_random_matrix_approximate():
mice = MICE(n_imputations=100, impute_type='pmm', n_nearest_columns=5)
XY_completed = mice.complete(XY_incomplete)
_, missing_mae = reconstruction_error(
XY,
XY_completed,
missing_mask,
name="MICE (impute_type=row)")
assert missing_mae < 0.1, "Error too high with approximate PMM method!"
if __name__ == "__main__":
test_mice_column_with_low_rank_random_matrix()
test_mice_row_with_low_rank_random_matrix()
test_mice_column_with_low_rank_random_matrix_approximate()
test_mice_row_with_low_rank_random_matrix_approximate()
| 33.946429
| 78
| 0.730668
| 260
| 1,901
| 4.876923
| 0.169231
| 0.049685
| 0.069401
| 0.107256
| 0.894322
| 0.894322
| 0.894322
| 0.894322
| 0.828864
| 0.728707
| 0
| 0.014249
| 0.187796
| 1,901
| 55
| 79
| 34.563636
| 0.806995
| 0
| 0
| 0.545455
| 0
| 0
| 0.137822
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.068182
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb68719b6dcde4eb60da15aa4735aa5c76592150
| 243
|
py
|
Python
|
winter_ddd/__init__.py
|
zhukovqs/winter
|
20c361f29f89ffa0dc27501e12df8ef6e22a8e4c
|
[
"MIT"
] | 9
|
2019-01-24T11:50:19.000Z
|
2019-07-05T07:58:46.000Z
|
winter_ddd/__init__.py
|
zhukovqs/winter
|
20c361f29f89ffa0dc27501e12df8ef6e22a8e4c
|
[
"MIT"
] | 100
|
2019-01-29T08:11:38.000Z
|
2020-04-03T12:00:42.000Z
|
winter_ddd/__init__.py
|
zhukovqs/winter
|
20c361f29f89ffa0dc27501e12df8ef6e22a8e4c
|
[
"MIT"
] | 8
|
2020-07-16T13:56:50.000Z
|
2021-12-27T03:33:23.000Z
|
from .aggregate_root import AggregateRoot
from .domain_event import DomainEvent
from .domain_event_dispatcher import global_domain_event_dispatcher
from .domain_event_handler import domain_event_handler
from .domain_events import DomainEvents
| 40.5
| 67
| 0.897119
| 32
| 243
| 6.4375
| 0.40625
| 0.26699
| 0.218447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082305
| 243
| 5
| 68
| 48.6
| 0.923767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb9f75f2e8a4372fcd7438f0866036a2eb9346c6
| 3,065
|
py
|
Python
|
source-code.py
|
eberkeaydin/dijkstra-graph
|
4208a4c311d7668fcd1daa4ef24a6acc2a951987
|
[
"MIT"
] | null | null | null |
source-code.py
|
eberkeaydin/dijkstra-graph
|
4208a4c311d7668fcd1daa4ef24a6acc2a951987
|
[
"MIT"
] | null | null | null |
source-code.py
|
eberkeaydin/dijkstra-graph
|
4208a4c311d7668fcd1daa4ef24a6acc2a951987
|
[
"MIT"
] | null | null | null |
import networkx as netx
import matplotlib.pyplot as plt
graph=netx.DiGraph()
graph.add_node(0)
graph.add_node(1)
graph.add_node(2)
graph.add_node(3)
graph.add_node(4)
graph.nodes[0]['pos']=(0,4)
graph.nodes[1]['pos']=(3,2.5)
graph.nodes[2]['pos']=(2,0)
graph.nodes[3]['pos']=(-2,0)
graph.nodes[4]['pos']=(-3,2.5)
graph.add_edge(0,1,weight=5)
graph.add_edge(0,2,weight=3)
graph.add_edge(0,4,weight=2)
graph.add_edge(1,2,weight=2)
graph.add_edge(1,3,weight=6)
graph.add_edge(2,1,weight=1)
graph.add_edge(2,3,weight=2)
graph.add_edge(4,1,weight=6)
graph.add_edge(4,2,weight=10)
graph.add_edge(4,3,weight=4)
node_pos=netx.get_node_attributes(graph,'pos')
arc_weight=netx.get_edge_attributes(graph,'weight')
netx.draw_networkx(graph, node_pos, node_size=450)
netx.draw_networkx_edges(graph, node_pos,edge_color= 'black')
netx.draw_networkx_edge_labels(graph, node_pos,label_pos=0.3, edge_labels=arc_weight)
plt.axis('off')
plt.show()
print("There is no way to nodes from 4. node.")
uzunluk1=0
uzunluk2=0
uzunluk3=0
dortten_bire = netx.dijkstra_path(graph,source=4,target=1)
print("\nShortest path(4,1):",end="")
print(dortten_bire)
temp=4
for integer in dortten_bire:
if integer==4:
continue
else:
uzunluk1+=arc_weight[(temp,integer)]
temp=integer
print("Shortest path length(4,1):",uzunluk1)
dortten_ikiye=netx.dijkstra_path(graph,source=4,target=2)
print("\nShortest path(4,2):",end="")
print(dortten_ikiye)
temp=4
for integer in dortten_ikiye:
if integer==4:
continue
else:
uzunluk2+=arc_weight[(temp,integer)]
temp=integer
print("Shortest path length(4,2):",uzunluk2)
dortten_uce=netx.dijkstra_path(graph,source=4,target=3)
print("\nShortest path(4,3):",end="")
print(dortten_uce)
temp=4
for integer in dortten_uce:
if integer==4:
continue
else:
uzunluk3+=arc_weight[(temp,integer)]
temp=integer
print("Shortest path length(4,3):",uzunluk3)
print("##################################################")
graph.remove_node(3)
node_pos=netx.get_node_attributes(graph,'pos')
arc_weight=netx.get_edge_attributes(graph,'weight')
netx.draw_networkx(graph, node_pos, node_size=450)
netx.draw_networkx_edges(graph, node_pos,edge_color= 'black')
netx.draw_networkx_edge_labels(graph, node_pos,label_pos=0.3, edge_labels=arc_weight)
plt.axis('off')
plt.show()
print("There is no way to 0 from 4. node")
uzunluk1=0
uzunluk2=0
uzunluk3=0
dortten_bire=netx.dijkstra_path(graph,source=4,target=1)
print("\nShortest path(4,1):",end="")
print(dortten_bire)
temp=4
for integer in dortten_bire:
if integer==4:
continue
else:
uzunluk1+=arc_weight[(temp,integer)]
temp=integer
print("Shortest path length(4,1):",uzunluk1)
dortten_ikiye=netx.dijkstra_path(graph,source=4,target=2)
print("\nShortest path(4,2):",end="")
print(dortten_ikiye)
temp=4
for integer in dortten_ikiye:
if integer==4:
continue
else:
uzunluk2+=arc_weight[(temp,integer)]
temp=integer
print("Shortest path length(4,2):",uzunluk2)
input()
| 24.717742
| 85
| 0.709951
| 509
| 3,065
| 4.115914
| 0.133595
| 0.057279
| 0.057279
| 0.050119
| 0.832458
| 0.759427
| 0.728878
| 0.712649
| 0.712649
| 0.712649
| 0
| 0.047584
| 0.115498
| 3,065
| 123
| 86
| 24.918699
| 0.725194
| 0
| 0
| 0.669903
| 0
| 0
| 0.132613
| 0.016372
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019417
| 0
| 0.019417
| 0.174757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8380609679d9bfa4892139ddeca0602c8f1071a2
| 112
|
py
|
Python
|
docs/source/working/include/snippets/processes/functions/signature_plain_python_call_default.py
|
pranavmodx/aiida-core
|
0edbbf82dfb97ab130914d1674a6f2217eba5971
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
docs/source/working/include/snippets/processes/functions/signature_plain_python_call_default.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/working/include/snippets/processes/functions/signature_plain_python_call_default.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
def add_multiply(x, y, z=1):
return (x + y) * z
add_multiply(1, 2) # x=1, y=2, z=1
| 16
| 35
| 0.5625
| 25
| 112
| 2.44
| 0.52
| 0.360656
| 0.098361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.223214
| 112
| 6
| 36
| 18.666667
| 0.632184
| 0.303571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
838c2bace5e78b6fcf465bb33012058e19d06ba5
| 235
|
py
|
Python
|
effdet/__init__.py
|
Jay9z/gwhd_efficentdet
|
2ee45a51172faa9fc448d4ee4cd3931eaaa87c53
|
[
"Apache-2.0"
] | null | null | null |
effdet/__init__.py
|
Jay9z/gwhd_efficentdet
|
2ee45a51172faa9fc448d4ee4cd3931eaaa87c53
|
[
"Apache-2.0"
] | null | null | null |
effdet/__init__.py
|
Jay9z/gwhd_efficentdet
|
2ee45a51172faa9fc448d4ee4cd3931eaaa87c53
|
[
"Apache-2.0"
] | null | null | null |
from .efficientdet import EfficientDet
from .bench import DetBenchEval, DetBenchTrain
#from .config.config import get_efficientdet_config
from .config import get_efficientdet_config
from .helpers import load_checkpoint, load_pretrained
| 47
| 53
| 0.87234
| 29
| 235
| 6.862069
| 0.413793
| 0.100503
| 0.150754
| 0.271357
| 0.371859
| 0.371859
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089362
| 235
| 5
| 53
| 47
| 0.929907
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83908d395811dd55647f059eb45b14cab1dbc558
| 84
|
py
|
Python
|
payments/tests/__init__.py
|
Elijah-glitch/Hey
|
00c09a0c8bfa9868d8048f697b36849569f9e127
|
[
"MIT"
] | 25
|
2016-07-14T06:16:17.000Z
|
2021-12-21T06:52:42.000Z
|
payments/tests/__init__.py
|
Elijah-glitch/Hey
|
00c09a0c8bfa9868d8048f697b36849569f9e127
|
[
"MIT"
] | 4
|
2017-11-29T20:20:30.000Z
|
2017-12-01T00:04:29.000Z
|
payments/tests/__init__.py
|
Elijah-glitch/Hey
|
00c09a0c8bfa9868d8048f697b36849569f9e127
|
[
"MIT"
] | 5
|
2016-07-19T18:26:24.000Z
|
2020-05-31T18:40:15.000Z
|
# flake8: noqa
from .bitcoin import *
from .paypal import *
from .stripe import *
| 12
| 22
| 0.702381
| 11
| 84
| 5.363636
| 0.636364
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.202381
| 84
| 6
| 23
| 14
| 0.865672
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83e079d520f78a8ff96bb981055a08790996eee0
| 36
|
py
|
Python
|
naf2conll/naf2conll/__main__.py
|
Filter-Bubble/FormatConversions
|
91c313d66edba077462740c1403a705aa1f96df4
|
[
"Apache-2.0"
] | 3
|
2019-11-21T13:43:37.000Z
|
2021-05-12T20:46:49.000Z
|
naf2conll/naf2conll/__main__.py
|
Filter-Bubble/FormatConversions
|
91c313d66edba077462740c1403a705aa1f96df4
|
[
"Apache-2.0"
] | 3
|
2018-05-22T13:07:43.000Z
|
2020-03-14T17:31:15.000Z
|
naf2conll/naf2conll/__main__.py
|
Filter-Bubble/FormatConversions
|
91c313d66edba077462740c1403a705aa1f96df4
|
[
"Apache-2.0"
] | 2
|
2020-03-05T15:55:47.000Z
|
2021-05-12T20:46:50.000Z
|
from .main import Main
Main.main()
| 9
| 22
| 0.722222
| 6
| 36
| 4.333333
| 0.5
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 36
| 3
| 23
| 12
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
83f54030d300d5175c80417b42d2cd1a74e5099f
| 177
|
py
|
Python
|
Python/packages/databricks-test/tests/library_test.py
|
anandmrya/DataOps
|
1a671c707e27b30030687a2a88e5fa94374ce780
|
[
"MIT"
] | 42
|
2019-12-04T04:10:53.000Z
|
2022-03-31T13:04:17.000Z
|
Python/packages/databricks-test/tests/library_test.py
|
anandmrya/DataOps
|
1a671c707e27b30030687a2a88e5fa94374ce780
|
[
"MIT"
] | 2
|
2020-02-25T11:24:34.000Z
|
2020-03-05T06:12:59.000Z
|
Python/packages/databricks-test/tests/library_test.py
|
anandmrya/DataOps
|
1a671c707e27b30030687a2a88e5fa94374ce780
|
[
"MIT"
] | 18
|
2020-01-25T06:25:08.000Z
|
2021-11-16T08:40:09.000Z
|
import databricks_test
def test_library():
with databricks_test.session() as dbrickstest:
# Run notebook
dbrickstest.run_notebook(".", "library_notebook")
| 22.125
| 57
| 0.706215
| 19
| 177
| 6.315789
| 0.578947
| 0.233333
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19774
| 177
| 7
| 58
| 25.285714
| 0.84507
| 0.067797
| 0
| 0
| 0
| 0
| 0.104294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7c0b8dc42c03443b567c05ec27bebae83f29cd8
| 171
|
py
|
Python
|
detection.py
|
devSeungGwan/CSI-Camera
|
9fa5b01511153959a30ef7d4ab375dba3897d21f
|
[
"BSD-3-Clause"
] | null | null | null |
detection.py
|
devSeungGwan/CSI-Camera
|
9fa5b01511153959a30ef7d4ab375dba3897d21f
|
[
"BSD-3-Clause"
] | null | null | null |
detection.py
|
devSeungGwan/CSI-Camera
|
9fa5b01511153959a30ef7d4ab375dba3897d21f
|
[
"BSD-3-Clause"
] | null | null | null |
import capture
from face_detection import face_detection
if __name__ == "__main__":
face = face_detection()
cam = capture.cam()
face.detection(cam.capture())
| 21.375
| 41
| 0.719298
| 21
| 171
| 5.333333
| 0.428571
| 0.464286
| 0.285714
| 0.410714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175439
| 171
| 8
| 42
| 21.375
| 0.794326
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
792375712560b837a3f3607bd6e18617500b6a9c
| 15,528
|
py
|
Python
|
src/Fig_6_supplement_1_Plotting.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | null | null | null |
src/Fig_6_supplement_1_Plotting.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | 3
|
2021-12-16T10:15:10.000Z
|
2021-12-16T12:54:24.000Z
|
src/Fig_6_supplement_1_Plotting.py
|
fmi-basel/gzenke-nonlinear-transient-amplification
|
f3b0c8c89b42c34f1aad740c7026865cf3164f1d
|
[
"MIT"
] | 1
|
2021-12-16T10:02:43.000Z
|
2021-12-16T10:02:43.000Z
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import patches
import matplotlib.patches as mpatches
import scipy.io as sio
# plotting configuration
ratio = 1.5
figure_len, figure_width = 15*ratio, 12*ratio
font_size_1, font_size_2 = 36*ratio, 36*ratio
legend_size = 18*ratio
line_width, tick_len = 3*ratio, 10*ratio
marker_size = 15*ratio
marker_edge_width = 3 * ratio
plot_line_width = 5*ratio
hfont = {'fontname': 'Arial'}
ratio_80, ratio_85, ratio_90, ratio_95, ratio_100, ratio_105, ratio_110, ratio_115, ratio_120, ratio_125, ratio_130, ratio_135, ratio_140 = [], [], [], [], [], [], [], [], [], [], [], [], []
n_loop = 20
for loop_idx in range(n_loop):
bs_80 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_80_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_80 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_80_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_85 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_85_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_85 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_85_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_90 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_90_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_90 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_90_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_95 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_95_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_95 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_95_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_100 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_100_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_100 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_100_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_105 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_105_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_105 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_105_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_110 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_110_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_110 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_110_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_115 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_115_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_115 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_115_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_120 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_120_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_120 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_120_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_125 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_125_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_125 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_125_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_130 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_130_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_130 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_130_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_135 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_135_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_135 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_135_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
bs_140 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_bl_firing_amp_140_' + str(
loop_idx) + '.mat')['mean_bl_firing_4_2'][0]
ss_140 = sio.loadmat(
'data_sum/spiking_neural_network/Revision_Fig_Point_1_2_Spiking_neural_networks_EE_STP_total_mean_ss_firing_amp_140_' + str(
loop_idx) + '.mat')['mean_ss_firing_4_2'][0]
ratio_80.append(ss_80 / bs_80)
ratio_85.append(ss_85 / bs_85)
ratio_90.append(ss_90 / bs_90)
ratio_95.append(ss_95 / bs_95)
ratio_100.append(ss_100 / bs_100)
ratio_105.append(ss_105 / bs_105)
ratio_110.append(ss_110 / bs_110)
ratio_115.append(ss_115 / bs_115)
ratio_120.append(ss_120 / bs_120)
ratio_125.append(ss_125 / bs_125)
ratio_130.append(ss_130 / bs_130)
ratio_135.append(ss_135 / bs_135)
ratio_140.append(ss_140 / bs_140)
# plotting
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
# plt.yscale('symlog', linthreshy=1)
# sns.boxplot(data=[ratio_80, ratio_85, ratio_90, ratio_95, ratio_100, ratio_105, ratio_110, ratio_115, ratio_120, ratio_125, ratio_130, ratio_135, ratio_140], width=0.4, linewidth=line_width)
ax = sns.boxplot(data=[ratio_80, ratio_90, ratio_100, ratio_110, ratio_120, ratio_130, ratio_140], width=0.45,
linewidth=line_width, color='white') # , showfliers = False)
print(len(ax.lines))
# iterate over boxes
for m, box in enumerate(ax.artists):
print(m)
box.set_edgecolor('black')
box.set_facecolor('white')
# iterate over whiskers and median lines
for j in range(6 * m, 6 * (m + 1)):
# print(j)
ax.lines[j].set_color('black')
# plot the data points
for i in range(len(ratio_80)):
if i % 2 == 0:
plt.plot(0 - 0.1, ratio_80[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(0 + 0.1, ratio_80[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_85)):
# if i%2 == 0:
# plt.plot(1 - 0.1, ratio_85[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(1 + 0.1, ratio_85[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_90)):
if i % 2 == 0:
plt.plot(1 - 0.1, ratio_90[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(1 + 0.1, ratio_90[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_95)):
# if i%2 == 0:
# plt.plot(3 - 0.1, ratio_95[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(3 + 0.1, ratio_95[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_100)):
if i % 2 == 0:
plt.plot(2 - 0.1, ratio_100[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(2 + 0.1, ratio_100[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_105)):
# if i%2 == 0:
# plt.plot(5 - 0.1, ratio_105[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(5 + 0.1, ratio_105[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_110)):
if i % 2 == 0:
plt.plot(3 - 0.1, ratio_110[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(3 + 0.1, ratio_110[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_115)):
# if i%2 == 0:
# plt.plot(7 - 0.1, ratio_115[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(7 + 0.1, ratio_115[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_120)):
if i % 2 == 0:
plt.plot(4 - 0.1, ratio_120[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(4 + 0.1, ratio_120[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_125)):
# if i%2 == 0:
# plt.plot(9 - 0.1, ratio_125[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(9 + 0.1, ratio_125[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_130)):
if i % 2 == 0:
plt.plot(5 - 0.1, ratio_130[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(5 + 0.1, ratio_130[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
# for i in range(len(ratio_135)):
# if i%2 == 0:
# plt.plot(11 - 0.1, ratio_135[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
# else:
# plt.plot(11 + 0.1, ratio_135[i], linestyle='none', marker='o', fillstyle='full',
# markeredgewidth=marker_edge_width, markersize=marker_size,
# markeredgecolor='black', markerfacecolor='none')
for i in range(len(ratio_140)):
if i % 2 == 0:
plt.plot(6 - 0.1, ratio_140[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
else:
plt.plot(6 + 0.1, ratio_140[i], linestyle='none', marker='o', fillstyle='full',
markeredgewidth=marker_edge_width, markersize=marker_size,
markeredgecolor='black', markerfacecolor='none')
plt.xticks([0, 2, 4, 6], ['8/30', '10/30', '12/30', '14/30'], fontsize=font_size_1, **hfont)
# plt.xticks([0, 1, 2, 3], ['8/30', '8.5/30', '9/30', '10/30'], fontsize=font_size_1, **hfont)
plt.yticks([0, 1, 2, 3, 4, 5], fontsize=font_size_1, **hfont)
plt.xlabel('Feedforward input', fontsize=font_size_1, **hfont)
plt.ylabel('Fixed point to baseline ratio', fontsize=font_size_1, **hfont)
plt.xlim([-0.5, 6.5])
# plt.xlim([-0.5, 12.5])
plt.ylim([0, 5])
plt.hlines(y=1, xmin=-0.5, xmax=6.5, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
plt.savefig('paper_figures/png/Revision_Fig_Point_1_2_Unstimulated_cotuned_neuron_SNN.png')
plt.savefig('paper_figures/pdf/Revision_Fig_Point_1_2_Unstimulated_cotuned_neuron_SNN.pdf')
| 52.282828
| 192
| 0.677035
| 2,239
| 15,528
| 4.292988
| 0.077713
| 0.070329
| 0.046608
| 0.049521
| 0.840096
| 0.83531
| 0.817728
| 0.81211
| 0.804827
| 0.795256
| 0
| 0.062097
| 0.191074
| 15,528
| 297
| 193
| 52.282828
| 0.703129
| 0.231904
| 0
| 0.341709
| 0
| 0
| 0.345051
| 0.264205
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030151
| 0
| 0.030151
| 0.01005
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f733624b52386015544bec768023286f7efa8a41
| 131
|
py
|
Python
|
app/repositories/__init__.py
|
VadymHutei/ukubuka-back
|
acd56c545b50fb65ed764c19bdd03a42be969ce4
|
[
"MIT"
] | null | null | null |
app/repositories/__init__.py
|
VadymHutei/ukubuka-back
|
acd56c545b50fb65ed764c19bdd03a42be969ce4
|
[
"MIT"
] | null | null | null |
app/repositories/__init__.py
|
VadymHutei/ukubuka-back
|
acd56c545b50fb65ed764c19bdd03a42be969ce4
|
[
"MIT"
] | null | null | null |
from repositories.category import CategoryRepo
from repositories.product import ProductRepo
from repositories.menu import MenuRepo
| 32.75
| 46
| 0.885496
| 15
| 131
| 7.733333
| 0.6
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 131
| 3
| 47
| 43.666667
| 0.97479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f75aef27b08c20cde9e42b88d25434d9c7e5324d
| 17,363
|
py
|
Python
|
tests/test_pascal_style_byte_stream.py
|
scottcwang/openssh_key_parser
|
f8ba2b841620abd9166e99176e033111daaf0570
|
[
"MIT"
] | 15
|
2020-08-15T02:34:34.000Z
|
2022-03-27T05:41:24.000Z
|
tests/test_pascal_style_byte_stream.py
|
scottcwang/openssh_key_parser
|
f8ba2b841620abd9166e99176e033111daaf0570
|
[
"MIT"
] | 3
|
2022-01-26T23:38:10.000Z
|
2022-01-30T15:41:06.000Z
|
tests/test_pascal_style_byte_stream.py
|
scottcwang/openssh_key_parser
|
f8ba2b841620abd9166e99176e033111daaf0570
|
[
"MIT"
] | 1
|
2022-01-27T10:47:26.000Z
|
2022-01-27T10:47:26.000Z
|
import pytest
from openssh_key.pascal_style_byte_stream import (
PascalStyleByteStream,
PascalStyleFormatInstruction,
PascalStyleFormatInstructionStringLengthSize
)
def test_read_fixed_bytes():
test_bytes = b'\x01\x02\x03\x04'
byte_stream = PascalStyleByteStream(test_bytes)
result = byte_stream.read_fixed_bytes(4)
assert result == test_bytes
def test_read_fixed_bytes_underfull():
test_bytes = b'\x01\x02\x03\x04'
byte_stream = PascalStyleByteStream(test_bytes)
with pytest.raises(EOFError):
byte_stream.read_fixed_bytes(5)
def test_read_fixed_bytes_overfull():
test_bytes = b'\x01\x02\x03\x04'
byte_stream = PascalStyleByteStream(test_bytes)
byte_stream.read_fixed_bytes(3)
assert byte_stream.read() == b'\x04'
def test_read_fixed_bytes_zero():
test_bytes = b'\x01\x02\x03\x04'
byte_stream = PascalStyleByteStream(test_bytes)
byte_stream.read_fixed_bytes(0)
assert byte_stream.read() == test_bytes
def test_read_pascal_bytes():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x02'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_pascal_bytes(4)
assert result == b'\x02'
def test_read_negative_pascal_bytes():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x02'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(
ValueError,
match='string_length_size must be positive'
):
byte_stream.read_pascal_bytes(-1)
def test_read_pascal_bytes_underfull_length():
pascal_bytes = b'\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(EOFError):
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
def test_read_pascal_bytes_underfull_string():
pascal_bytes = b'\x00\x00\x00\x04' + b'\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(EOFError):
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
def test_read_pascal_bytes_overfull():
pascal_bytes = b'\x00\x00\x00\x04' + b'abcd' + b'\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
assert byte_stream.read() == b'\x00'
def test_read_from_struct_single_format_instruction():
test_bytes = b'\x00\x00\x00\x01'
byte_stream = PascalStyleByteStream(test_bytes)
result = byte_stream.read_from_format_instruction('>I')
assert result == 1
def test_read_from_struct_multiple_format_instruction():
test_bytes = b'\x00\x00\x00\x01\x00\x00\x00\x02'
byte_stream = PascalStyleByteStream(test_bytes)
result = byte_stream.read_from_format_instruction('>II')
assert result == (1, 2)
def test_read_from_string_format_instruction():
pascal_bytes = b'\x00\x00\x00\x04' + b'abcd'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
assert result == 'abcd'
def test_read_from_bytes_format_instruction():
pascal_bytes = b'\x00\x00\x00\x04' + b'\x01\x02\x03\x04'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.BYTES)
assert result == b'\x01\x02\x03\x04'
def test_read_from_pos_mpint_format_instruction():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x7f'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.MPINT)
assert result == 0x7f
def test_read_from_neg_mpint_format_instruction():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x80'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.MPINT)
assert result == -0x80
def test_read_from_zero_mpint_format_instruction():
pascal_bytes = b'\x00\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.MPINT)
assert result == 0
def test_read_from_string_format_instruction_length():
pascal_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x04' + b'abcd'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING,
string_length_size=8
)
assert result == 'abcd'
def test_read_from_pascal_underfull_length():
pascal_bytes = b'\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(EOFError):
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
def test_read_from_pascal_underfull_string():
pascal_bytes = b'\x00\x00\x00\x04' + b'\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(EOFError):
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
def test_read_from_pascal_overfull():
pascal_bytes = b'\x00\x00\x00\x04' + b'abcd' + b'\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
byte_stream.read_from_format_instruction(
PascalStyleFormatInstruction.STRING)
assert byte_stream.read() == b'\x00'
def test_read_from_format_instructions_dict():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x00' \
+ b'\x00\x00\x00\x02'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instructions_dict({
'first': PascalStyleFormatInstruction.BYTES,
'second': '>I'
})
assert result == {
'first': b'\x00',
'second': 2
}
def test_read_from_empty_format_instructions_dict():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x00' \
+ b'\x00\x00\x00\x02'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instructions_dict({})
assert result == {}
def test_read_from_format_instructions_dict_underfull():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x00' \
+ b'\x00\x00\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
with pytest.raises(EOFError):
byte_stream.read_from_format_instructions_dict({
'first': PascalStyleFormatInstruction.BYTES,
'second': '>I',
})
def test_read_from_format_instructions_dict_overfull():
pascal_bytes = b'\x00\x00\x00\x01' + b'\x00' \
+ b'\x00\x00\x00\x02' \
+ b'\x03'
byte_stream = PascalStyleByteStream(pascal_bytes)
byte_stream.read_from_format_instructions_dict({
'first': PascalStyleFormatInstruction.BYTES,
'second': '>I',
})
assert byte_stream.read() == b'\x03'
def test_read_from_format_instructions_dict_length():
pascal_bytes = b'\x01' + b'\x00'
byte_stream = PascalStyleByteStream(pascal_bytes)
result = byte_stream.read_from_format_instructions_dict({
'first': PascalStyleFormatInstructionStringLengthSize(
PascalStyleFormatInstruction.BYTES,
1
)
})
assert result == {
'first': b'\x00'
}
def test_write_from_struct_format_instruction():
test_int = 1
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction('>I', test_int)
assert byte_stream.getvalue() == b'\x00\x00\x00\x01'
def test_write_from_bytes_format_instruction():
test_bytes = b'\x00'
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.BYTES,
test_bytes
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x01' + b'\x00'
def test_write_from_string_format_instruction():
test_string = 'abcd'
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.STRING,
test_string
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x04' + b'abcd'
def test_write_from_string_format_instruction_string_length_size():
test_string = 'abcd'
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.STRING,
test_string,
8
)
assert byte_stream.getvalue() == \
b'\x00\x00\x00\x00\x00\x00\x00\x04' + b'abcd'
def test_write_from_pos_no_prefix_mpint_format_instruction():
test_int = 0x1000
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test_int
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x02' + b'\x10\x00'
def test_write_from_pos_with_prefix_mpint_format_instruction():
test_int = 0x8000
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test_int
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x03' + b'\x00\x80\x00'
def test_write_from_neg_mpint_format_instruction():
test_int = -0x8000
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test_int
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x02' + b'\x80\x00'
def test_write_from_zero_mpint_format_instruction():
test_int = 0
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test_int
)
assert byte_stream.getvalue() == b'\x00\x00\x00\x00'
def test_write_from_bytes_format_instruction_bad_class_str():
test = 'random'
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be a bytes instance for bytes format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.BYTES,
test
)
def test_write_from_bytes_format_instruction_bad_class_int():
test = 1
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be a bytes instance for bytes format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.BYTES,
test
)
def test_write_from_str_format_instruction_bad_class_bytes():
test = b'random'
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be a str instance for string format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.STRING,
test
)
def test_write_from_str_format_instruction_bad_class_int():
test = 1
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be a str instance for string format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.STRING,
test
)
def test_write_from_mpint_format_instruction_bad_class_bytes():
test = b'random'
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be an int instance for mpint format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test
)
def test_write_from_mpint_format_instruction_bad_class_str():
test = 'random'
byte_stream = PascalStyleByteStream()
with pytest.raises(
ValueError,
match='value must be an int instance for mpint format instruction'
):
byte_stream.write_from_format_instruction(
PascalStyleFormatInstruction.MPINT,
test
)
def test_write_from_format_instructions_dict():
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instructions_dict({
'first': PascalStyleFormatInstruction.BYTES,
'second': '>I',
}, {
'first': b'\x00',
'second': 2,
})
assert byte_stream.getvalue() == b'\x00\x00\x00\x01' + b'\x00' \
+ b'\x00\x00\x00\x02'
def test_write_from_empty_format_instructions_dict():
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instructions_dict({}, {
'first': b'\x00',
'second': 2,
})
assert byte_stream.getvalue() == b''
def test_write_from_format_instructions_dict_missing_key():
byte_stream = PascalStyleByteStream()
with pytest.raises(KeyError):
byte_stream.write_from_format_instructions_dict({
'missing': '>I'
}, {
'first': b'\x00',
'second': 2,
})
def test_write_from_format_instructions_dict_length():
byte_stream = PascalStyleByteStream()
byte_stream.write_from_format_instructions_dict({
'first': PascalStyleFormatInstructionStringLengthSize(
PascalStyleFormatInstruction.BYTES,
2
)
}, {
'first': b'\x00'
})
assert byte_stream.getvalue() == b'\x00\x01' + b'\x00'
def test_check_dict_str():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 'string'
},
{
'a': PascalStyleFormatInstruction.STRING
}
)
assert not warnings_list
def test_check_dict_bytes():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': b'\x00'
},
{
'a': PascalStyleFormatInstruction.BYTES
}
)
assert not warnings_list
def test_check_dict_mpint():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 1
},
{
'a': PascalStyleFormatInstruction.MPINT
}
)
assert not warnings_list
def test_check_dict_incorrect_type():
with pytest.warns(UserWarning, match='a should be of class int'):
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 'string'
},
{
'a': PascalStyleFormatInstruction.MPINT
}
)
def test_check_dict_format_string():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 1
},
{
'a': '>i'
}
)
assert not warnings_list
def test_check_dict_format_string_too_large():
with pytest.warns(UserWarning, match='a should be formatted as >i'):
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 2 ** 33
},
{
'a': '>i'
}
)
def test_check_dict_two_attributes():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 1,
'b': 2
},
{
'a': PascalStyleFormatInstruction.MPINT,
'b': PascalStyleFormatInstruction.MPINT
}
)
assert not warnings_list
def test_check_dict_missing_attribute():
with pytest.warns(UserWarning, match='b missing'):
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 1
},
{
'a': PascalStyleFormatInstruction.MPINT,
'b': PascalStyleFormatInstruction.MPINT
}
)
def test_check_dict_extra_attribute():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 1,
'b': 2,
'c': 3
},
{
'a': PascalStyleFormatInstruction.MPINT,
'b': PascalStyleFormatInstruction.MPINT
}
)
assert not warnings_list
def test_check_dict_length():
with pytest.warns(None) as warnings_list:
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 'string'
},
{
'a': PascalStyleFormatInstructionStringLengthSize(
PascalStyleFormatInstruction.STRING,
1
)
}
)
assert not warnings_list
def test_check_dict_length_incorrect_type():
with pytest.warns(UserWarning, match='a should be of class int'):
PascalStyleByteStream.check_dict_matches_format_instructions_dict(
{
'a': 'string'
},
{
'a': PascalStyleFormatInstructionStringLengthSize(
PascalStyleFormatInstruction.MPINT,
1
)
}
)
| 30.039792
| 75
| 0.666878
| 1,882
| 17,363
| 5.796493
| 0.057386
| 0.094417
| 0.038775
| 0.033
| 0.932899
| 0.893482
| 0.861766
| 0.814098
| 0.772298
| 0.738564
| 0
| 0.036507
| 0.242758
| 17,363
| 577
| 76
| 30.091854
| 0.7932
| 0
| 0
| 0.547009
| 0
| 0
| 0.090595
| 0.005529
| 0
| 0
| 0.001497
| 0
| 0.076923
| 1
| 0.115385
| false
| 0
| 0.004274
| 0
| 0.119658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f760aefd9a8a0f0b01d937621f42741411b78ab9
| 77
|
py
|
Python
|
maxwell_slice/__init__.py
|
flatironinstitute/maxwell-slice
|
5941ca10098070f7df7f44cf662174c598a7521c
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:11:18.000Z
|
2021-09-23T01:11:18.000Z
|
maxwell_slice/__init__.py
|
flatironinstitute/maxwell-slice
|
5941ca10098070f7df7f44cf662174c598a7521c
|
[
"Apache-2.0"
] | 9
|
2021-01-04T18:30:43.000Z
|
2021-02-09T19:30:51.000Z
|
maxwell_slice/__init__.py
|
flatironinstitute/maxwell-slice
|
5941ca10098070f7df7f44cf662174c598a7521c
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:11:19.000Z
|
2021-09-23T01:11:19.000Z
|
from .test_function import test_function
from .sphere_scat import sphere_scat
| 38.5
| 40
| 0.883117
| 12
| 77
| 5.333333
| 0.5
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 77
| 2
| 41
| 38.5
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f77baf2322e74066e5dc85abaccf437de525c52f
| 108
|
py
|
Python
|
lasier/adapters/caches/__init__.py
|
rafa-acioly/lasier
|
b518f93207ff15ba32b286f466f3ca3cea231b4c
|
[
"MIT"
] | null | null | null |
lasier/adapters/caches/__init__.py
|
rafa-acioly/lasier
|
b518f93207ff15ba32b286f466f3ca3cea231b4c
|
[
"MIT"
] | null | null | null |
lasier/adapters/caches/__init__.py
|
rafa-acioly/lasier
|
b518f93207ff15ba32b286f466f3ca3cea231b4c
|
[
"MIT"
] | null | null | null |
from .aiocache import Adapter as AiocacheAdapter # noqa
from .redis import Adapter as RedisAdapter # noqa
| 36
| 56
| 0.796296
| 14
| 108
| 6.142857
| 0.642857
| 0.302326
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 108
| 2
| 57
| 54
| 0.955556
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e38cd6662e93ae5781017f3bd4903fd9e54605d6
| 24,003
|
py
|
Python
|
backend/tests/baserow/api/users/test_user_views.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/api/users/test_user_views.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/api/users/test_user_views.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
import os
import pytest
from unittest.mock import patch
from freezegun import freeze_time
from rest_framework.status import (
HTTP_200_OK,
HTTP_201_CREATED,
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
)
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from django.conf import settings
from baserow.api.user.registries import user_data_registry, UserDataType
from baserow.contrib.database.models import Database, Table
from baserow.core.handler import CoreHandler
from baserow.core.models import Group, GroupUser
from baserow.core.user.handler import UserHandler
User = get_user_model()
@pytest.mark.django_db
def test_create_user(client, data_fixture):
valid_password = "thisIsAValidPassword"
short_password = "short"
response = client.post(
reverse("api:user:index"),
{"name": "Test1", "email": "test@test.nl", "password": valid_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
user = User.objects.get(email="test@test.nl")
assert user.first_name == "Test1"
assert user.email == "test@test.nl"
assert user.password != ""
assert "password" not in response_json["user"]
assert response_json["user"]["username"] == "test@test.nl"
assert response_json["user"]["first_name"] == "Test1"
assert response_json["user"]["is_staff"] is True
assert response_json["user"]["id"] == user.id
# Test profile properties
response = client.post(
reverse("api:user:index"),
{
"name": "Test1Bis",
"email": "test1bis@test.nl",
"password": valid_password,
"language": "fr",
},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
user = User.objects.get(email="test1bis@test.nl")
assert user.profile.language == "fr"
assert response_json["user"]["language"] == "fr"
response_failed = client.post(
reverse("api:user:index"),
{"name": "Test1", "email": "test@test.nl", "password": valid_password},
format="json",
)
assert response_failed.status_code == 400
assert response_failed.json()["error"] == "ERROR_EMAIL_ALREADY_EXISTS"
response_failed = client.post(
reverse("api:user:index"),
{"name": "Test1", "email": " teSt@teST.nl ", "password": valid_password},
format="json",
)
assert response_failed.status_code == 400
assert response_failed.json()["error"] == "ERROR_EMAIL_ALREADY_EXISTS"
too_long_name = "x" * 151
response_failed = client.post(
reverse("api:user:index"),
{
"name": too_long_name,
"email": "new@example.com ",
"password": valid_password,
},
format="json",
)
assert response_failed.status_code == 400
assert response_failed.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_failed.json()["detail"] == {
"name": [
{
"code": "max_length",
"error": "Ensure this field has no more than 150 characters.",
}
]
}
data_fixture.update_settings(allow_new_signups=False)
response_failed = client.post(
reverse("api:user:index"),
{"name": "Test1", "email": "test10@test.nl", "password": valid_password},
format="json",
)
assert response_failed.status_code == 400
assert response_failed.json()["error"] == "ERROR_DISABLED_SIGNUP"
data_fixture.update_settings(allow_new_signups=True)
response_failed_2 = client.post(
reverse("api:user:index"), {"email": "test"}, format="json"
)
assert response_failed_2.status_code == 400
long_password = "x" * 256
response = client.post(
reverse("api:user:index"),
{"name": "Test2", "email": "test2@test.nl", "password": long_password},
format="json",
)
assert response.status_code == HTTP_200_OK
user = User.objects.get(email="test2@test.nl")
assert user.check_password(long_password)
long_password = "x" * 257
response = client.post(
reverse("api:user:index"),
{"name": "Test2", "email": "test2@test.nl", "password": long_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["password"][0]["code"] == "password_validation_failed"
)
assert (
response_json["detail"]["password"][0]["error"]
== "This password is too long. It must not exceed 256 characters."
)
response = client.post(
reverse("api:user:index"),
{"name": "Test2", "email": "random@test.nl", "password": short_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["password"][0]["code"] == "password_validation_failed"
)
assert (
response_json["detail"]["password"][0]["error"]
== "This password is too short. It must contain at least 8 characters."
)
# Test profile attribute errors
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test20@test.nl",
"password": valid_password,
"language": "invalid",
},
format="json",
)
response_json = response_failed.json()
assert response_failed.status_code == 400
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"]["language"][0]["code"] == "invalid_language"
assert response_json["detail"]["language"][0]["error"] == (
"Only the following language keys are "
f"valid: {','.join([l[0] for l in settings.LANGUAGES])}"
)
@pytest.mark.django_db
def test_user_account(data_fixture, api_client):
user, token = data_fixture.create_user_and_token(
email="test@localhost.nl", language="en", first_name="Nikolas"
)
response = api_client.patch(
reverse("api:user:account"),
{
"first_name": "NewOriginalName",
"language": "fr",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["first_name"] == "NewOriginalName"
assert response_json["language"] == "fr"
user.refresh_from_db()
assert user.first_name == "NewOriginalName"
assert user.profile.language == "fr"
response = api_client.patch(
reverse("api:user:account"),
{
"language": "invalid",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == 400
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"]["language"][0]["code"] == "invalid_language"
assert response_json["detail"]["language"][0]["error"] == (
"Only the following language keys are "
f"valid: {','.join([l[0] for l in settings.LANGUAGES])}"
)
@pytest.mark.django_db
def test_create_user_with_invitation(data_fixture, client):
core_handler = CoreHandler()
valid_password = "thisIsAValidPassword"
invitation = data_fixture.create_group_invitation(email="test0@test.nl")
signer = core_handler.get_group_invitation_signer()
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test@test.nl",
"password": valid_password,
"group_invitation_token": "INVALID",
},
format="json",
)
assert response_failed.status_code == HTTP_400_BAD_REQUEST
assert response_failed.json()["error"] == "BAD_TOKEN_SIGNATURE"
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test0@test.nl",
"password": valid_password,
"group_invitation_token": f"{signer.dumps(invitation.id)}2",
},
format="json",
)
assert response_failed.status_code == HTTP_400_BAD_REQUEST
assert response_failed.json()["error"] == "BAD_TOKEN_SIGNATURE"
assert User.objects.all().count() == 1
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test@test.nl",
"password": valid_password,
"group_invitation_token": signer.dumps(99999),
},
format="json",
)
assert response_failed.status_code == HTTP_404_NOT_FOUND
assert response_failed.json()["error"] == "ERROR_GROUP_INVITATION_DOES_NOT_EXIST"
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test@test.nl",
"password": valid_password,
"group_invitation_token": signer.dumps(invitation.id),
},
format="json",
)
assert response_failed.status_code == HTTP_400_BAD_REQUEST
assert response_failed.json()["error"] == "ERROR_GROUP_INVITATION_EMAIL_MISMATCH"
assert User.objects.all().count() == 1
response_failed = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test0@test.nl",
"password": valid_password,
"group_invitation_token": signer.dumps(invitation.id),
},
format="json",
)
assert response_failed.status_code == HTTP_200_OK
assert User.objects.all().count() == 2
assert Group.objects.all().count() == 1
assert Group.objects.all().first().id == invitation.group_id
assert GroupUser.objects.all().count() == 2
assert Database.objects.all().count() == 0
assert Table.objects.all().count() == 0
@pytest.mark.django_db
def test_create_user_with_template(data_fixture, client):
old_templates = settings.APPLICATION_TEMPLATES_DIR
valid_password = "thisIsAValidPassword"
settings.APPLICATION_TEMPLATES_DIR = os.path.join(
settings.BASE_DIR, "../../../tests/templates"
)
template = data_fixture.create_template(slug="example-template")
response = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test0@test.nl",
"password": valid_password,
"template_id": -1,
},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"]["template_id"][0]["code"] == "does_not_exist"
response = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test0@test.nl",
"password": valid_password,
"template_id": "random",
},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"]["template_id"][0]["code"] == "incorrect_type"
response = client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test0@test.nl",
"password": valid_password,
"template_id": template.id,
},
format="json",
)
assert response.status_code == HTTP_200_OK
assert Group.objects.all().count() == 2
assert GroupUser.objects.all().count() == 1
# We expect the example template to be installed
assert Database.objects.all().count() == 1
assert Database.objects.all().first().name == "Event marketing"
assert Table.objects.all().count() == 2
settings.APPLICATION_TEMPLATES_DIR = old_templates
@pytest.mark.django_db(transaction=True)
def test_send_reset_password_email(data_fixture, client, mailoutbox):
data_fixture.create_user(email="test@localhost.nl")
response = client.post(
reverse("api:user:send_reset_password_email"), {}, format="json"
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = client.post(
reverse("api:user:send_reset_password_email"),
{"email": "unknown@localhost.nl", "base_url": "http://localhost:3000"},
format="json",
)
assert response.status_code == 204
assert len(mailoutbox) == 0
response = client.post(
reverse("api:user:send_reset_password_email"),
{"email": "test@localhost.nl", "base_url": "http://test.nl"},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_HOSTNAME_IS_NOT_ALLOWED"
assert len(mailoutbox) == 0
response = client.post(
reverse("api:user:send_reset_password_email"),
{"email": "test@localhost.nl", "base_url": "http://localhost:3000"},
format="json",
)
assert response.status_code == 204
assert len(mailoutbox) == 1
response = client.post(
reverse("api:user:send_reset_password_email"),
{"email": " teST@locAlhost.nl ", "base_url": "http://localhost:3000"},
format="json",
)
assert response.status_code == 204
assert len(mailoutbox) == 2
email = mailoutbox[0]
assert "test@localhost.nl" in email.to
assert email.body.index("http://localhost:3000")
@pytest.mark.django_db
def test_password_reset(data_fixture, client):
user = data_fixture.create_user(email="test@localhost")
handler = UserHandler()
valid_password = "thisIsAValidPassword"
short_password = "short"
long_password = (
"Bgvmt95en6HGJZ9Xz0F8xysQ6eYgo2Y54YzRPxxv10b5n16F4rZ6YH4ulonocwiFK6970KiAxoYhU"
"LYA3JFDPIQGj5gMZZl25M46sO810Zd3nyBg699a2TDMJdHG7hAAi0YeDnuHuabyBawnb4962OQ1OO"
"f1MxzFyNWG7NR2X6MZQL5G1V61x56lQTXbvK1AG1IPM87bQ3YAtIBtGT2vK3Wd83q3he5ezMtUfzK"
"2ibj0WWhf86DyQB4EHRUJjYcBiI78iEJv5hcu33X2I345YosO66cTBWK45SqJEDudrCOq"
)
signer = handler.get_reset_password_signer()
response = client.post(reverse("api:user:reset_password"), {}, format="json")
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = client.post(
reverse("api:user:reset_password"),
{"token": "test", "password": valid_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "BAD_TOKEN_SIGNATURE"
with freeze_time("2020-01-01 12:00"):
token = signer.dumps(user.id)
with freeze_time("2020-01-04 12:00"):
response = client.post(
reverse("api:user:reset_password"),
{"token": token, "password": valid_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "EXPIRED_TOKEN_SIGNATURE"
with freeze_time("2020-01-01 12:00"):
token = signer.dumps(9999)
with freeze_time("2020-01-02 12:00"):
response = client.post(
reverse("api:user:reset_password"),
{"token": token, "password": valid_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_USER_NOT_FOUND"
with freeze_time("2020-01-01 12:00"):
token = signer.dumps(user.id)
with freeze_time("2020-01-02 12:00"):
response = client.post(
reverse("api:user:reset_password"),
{"token": token, "password": valid_password},
format="json",
)
assert response.status_code == 204
user.refresh_from_db()
assert user.check_password(valid_password)
with freeze_time("2020-01-02 12:00"):
token = signer.dumps(user.id)
with freeze_time("2020-01-02 12:00"):
response = client.post(
reverse("api:user:reset_password"),
{"token": token, "password": short_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["password"][0]["code"]
== "password_validation_failed"
)
assert (
response_json["detail"]["password"][0]["error"]
== "This password is too short. It must contain at least 8 characters."
)
user.refresh_from_db()
assert not user.check_password(short_password)
with freeze_time("2020-01-02 12:00"):
response = client.post(
reverse("api:user:reset_password"),
{"token": token, "password": long_password},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["password"][0]["code"]
== "password_validation_failed"
)
assert (
response_json["detail"]["password"][0]["error"]
== "This password is too long. It must not exceed 256 characters."
)
user.refresh_from_db()
assert not user.check_password(long_password)
@pytest.mark.django_db
def test_change_password(data_fixture, client):
valid_old_password = "thisIsAValidPassword"
valid_new_password = "thisIsAValidNewPassword"
short_password = "short"
long_password = (
"Bgvmt95en6HGJZ9Xz0F8xysQ6eYgo2Y54YzRPxxv10b5n16F4rZ6YH4ulonocwiFK6970KiAxoYhU"
"LYA3JFDPIQGj5gMZZl25M46sO810Zd3nyBg699a2TDMJdHG7hAAi0YeDnuHuabyBawnb4962OQ1OO"
"f1MxzFyNWG7NR2X6MZQL5G1V61x56lQTXbvK1AG1IPM87bQ3YAtIBtGT2vK3Wd83q3he5ezMtUfzK"
"2ibj0WWhf86DyQB4EHRUJjYcBiI78iEJv5hcu33X2I345YosO66cTBWK45SqJEDudrCOq"
)
user, token = data_fixture.create_user_and_token(
email="test@localhost", password=valid_old_password
)
response = client.post(
reverse("api:user:change_password"),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = client.post(
reverse("api:user:change_password"),
{"old_password": "INCORRECT", "new_password": valid_new_password},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_OLD_PASSWORD"
user.refresh_from_db()
assert user.check_password(valid_old_password)
response = client.post(
reverse("api:user:change_password"),
{"old_password": valid_old_password, "new_password": short_password},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["new_password"][0]["code"]
== "password_validation_failed"
)
assert (
response_json["detail"]["new_password"][0]["error"]
== "This password is too short. It must contain at least 8 characters."
)
user.refresh_from_db()
assert user.check_password(valid_old_password)
response = client.post(
reverse("api:user:change_password"),
{"old_password": valid_old_password, "new_password": long_password},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["new_password"][0]["code"]
== "password_validation_failed"
)
assert (
response_json["detail"]["new_password"][0]["error"]
== "This password is too long. It must not exceed 256 characters."
)
user.refresh_from_db()
assert user.check_password(valid_old_password)
response = client.post(
reverse("api:user:change_password"),
{"old_password": valid_old_password, "new_password": valid_new_password},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 204
user.refresh_from_db()
assert user.check_password(valid_new_password)
@pytest.mark.django_db
def test_dashboard(data_fixture, client):
user, token = data_fixture.create_user_and_token(email="test@localhost")
group_1 = data_fixture.create_group(name="Test1")
group_2 = data_fixture.create_group()
invitation_1 = data_fixture.create_group_invitation(
group=group_1, email="test@localhost"
)
data_fixture.create_group_invitation(group=group_1, email="test2@localhost")
data_fixture.create_group_invitation(group=group_2, email="test3@localhost")
response = client.get(
reverse("api:user:dashboard"), format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert len(response_json["group_invitations"]) == 1
assert response_json["group_invitations"][0]["id"] == invitation_1.id
assert response_json["group_invitations"][0]["email"] == invitation_1.email
assert response_json["group_invitations"][0]["invited_by"] == (
invitation_1.invited_by.first_name
)
assert response_json["group_invitations"][0]["group"] == "Test1"
assert response_json["group_invitations"][0]["message"] == invitation_1.message
assert "created_on" in response_json["group_invitations"][0]
@pytest.mark.django_db
def test_additional_user_data(api_client, data_fixture):
class TmpUserDataType(UserDataType):
type = "type"
def get_user_data(self, user, request) -> dict:
return True
plugin_mock = TmpUserDataType()
with patch.dict(user_data_registry.registry, {"tmp": plugin_mock}):
response = api_client.post(
reverse("api:user:index"),
{
"name": "Test1",
"email": "test@test.nl",
"password": "thisIsAValidPassword",
"authenticate": True,
},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["tmp"] is True
response = api_client.post(
reverse("api:user:token_auth"),
{"username": "test@test.nl", "password": "thisIsAValidPassword"},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_201_CREATED
assert response_json["tmp"] is True
response = api_client.post(
reverse("api:user:token_refresh"),
{"token": response_json["token"]},
format="json",
)
response_json = response.json()
assert response.status_code == HTTP_201_CREATED
assert response_json["tmp"] is True
| 35.040876
| 87
| 0.641045
| 2,666
| 24,003
| 5.524381
| 0.084771
| 0.084737
| 0.06233
| 0.05296
| 0.803639
| 0.760796
| 0.729155
| 0.711366
| 0.683732
| 0.616852
| 0
| 0.029775
| 0.227638
| 24,003
| 684
| 88
| 35.092105
| 0.764658
| 0.004166
| 0
| 0.565506
| 0
| 0
| 0.252939
| 0.08741
| 0
| 0
| 0
| 0
| 0.227197
| 1
| 0.016584
| false
| 0.162521
| 0.021559
| 0.001658
| 0.043118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e3a1f3f45077d112795da5ac34b5f3557d6232aa
| 45
|
py
|
Python
|
test.py
|
RusherRG/Hack-it
|
3a14bf4a1dc129d567fa9bc56d77b4f96a0d4b8c
|
[
"MIT"
] | 1
|
2021-04-09T06:45:05.000Z
|
2021-04-09T06:45:05.000Z
|
test.py
|
RusherRG/Hack-it
|
3a14bf4a1dc129d567fa9bc56d77b4f96a0d4b8c
|
[
"MIT"
] | null | null | null |
test.py
|
RusherRG/Hack-it
|
3a14bf4a1dc129d567fa9bc56d77b4f96a0d4b8c
|
[
"MIT"
] | null | null | null |
print(add(5, 3))
def add(x, y):
return x+y
| 9
| 16
| 0.577778
| 11
| 45
| 2.363636
| 0.727273
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.2
| 45
| 4
| 17
| 11.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
54191d23ae6c2f8e839496e36509dee873489bcc
| 5,083
|
py
|
Python
|
tests/server/extensions/test_loqusdb_extension_init.py
|
gmc-norr/scout
|
ea8eaaa079c63e4033af6216ec08da4a314f9b5c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/server/extensions/test_loqusdb_extension_init.py
|
gmc-norr/scout
|
ea8eaaa079c63e4033af6216ec08da4a314f9b5c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/server/extensions/test_loqusdb_extension_init.py
|
gmc-norr/scout
|
ea8eaaa079c63e4033af6216ec08da4a314f9b5c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for loqusdb extension"""
import subprocess
import pytest
from flask import Flask
from scout.server.extensions.loqus_extension import LoqusDB
def test_init_loqusextension(loqus_exe):
"""Test a init a loqus extension object"""
# GIVEN a loqusdb binary
# WHEN initialising a loqusdb extension
loqus_obj = LoqusDB(loqusdb_binary=loqus_exe)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
# THEN assert that the base call is correct
assert loqus_obj.base_call == [loqus_exe]
# THEN assert that the version is 0
assert loqus_obj.version == 0
# THEN assert that there is no config
assert loqus_obj.loqusdb_config is None
def test_init_loqusextension_version(loqus_exe, loqus_version):
"""Test a init a loqus extension object with a specified version"""
# GIVEN a loqusdb binary and a version
# WHEN initialising a loqusdb extension
loqus_obj = LoqusDB(loqusdb_binary=loqus_exe, version=loqus_version)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
# THEN assert that the base call is correct
assert loqus_obj.base_call == [loqus_exe]
# THEN assert that the version is correct
assert loqus_obj.version == loqus_version
# THEN assert that there is no config
assert loqus_obj.loqusdb_config is None
def test_init_loqusextension_config(loqus_exe, loqus_config, loqus_version):
"""Test a init a loqus extension object with a specified version"""
# GIVEN a loqusdb binary, a version and a config
# WHEN initialising a loqusdb extension
loqus_obj = LoqusDB(
loqusdb_binary=loqus_exe, loqusdb_config=loqus_config, version=loqus_version
)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
# THEN assert that the base call is correct
assert loqus_obj.base_call == [loqus_exe, "--config", loqus_config]
# THEN assert that the version is correct
assert loqus_obj.version == loqus_version
# THEN assert that there is no config
assert loqus_obj.loqusdb_config == loqus_config
def test_init_loqusextension_init_app(loqus_exe, loqus_version):
"""Test a init a loqus extension object with flask app with version"""
# GIVEN a loqusdb binary
configs = {"LOQUSDB_SETTINGS": {"binary_path": loqus_exe, "version": loqus_version}}
# WHEN initialising a loqusdb extension with init app
app = Flask(__name__)
loqus_obj = LoqusDB()
with app.app_context():
app.config = configs
loqus_obj.init_app(app)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
# THEN assert that the version is correct
assert loqus_obj.version == loqus_version
# THEN assert that there is no config
assert loqus_obj.loqusdb_config is None
def test_init_loqusextension_init_app_no_version(mocker, loqus_exe, loqus_version):
"""Test a init a loqus extension object with flask app"""
# GIVEN a loqusdb binary
configs = {"LOQUSDB_SETTINGS": {"binary_path": loqus_exe}}
mocker.patch.object(subprocess, "check_output")
subprocess.check_output.return_value = b"loqusdb, version %f" % loqus_version
# WHEN initialising a loqusdb extension with init app
app = Flask(__name__)
loqus_obj = LoqusDB()
with app.app_context():
app.config = configs
loqus_obj.init_app(app)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
assert loqus_obj.version == loqus_version
# THEN assert that there is no config
assert loqus_obj.loqusdb_config is None
def test_init_loqusextension_init_app_wrong_version(loqus_exe):
"""Test a init a loqus extension object with flask app"""
# GIVEN a loqusdb binary
configs = {"LOQUSDB_SETTINGS": {"binary_path": loqus_exe, "version": 1.0}}
# WHEN initialising a loqusdb extension with init app
app = Flask(__name__)
loqus_obj = LoqusDB()
with pytest.raises(SyntaxError):
with app.app_context():
app.config = configs
loqus_obj.init_app(app)
def test_init_loqusextension_init_app_with_config(loqus_exe, loqus_config):
"""Test a init a loqus extension object with flask app with version and config"""
# GIVEN a loqusdb binary
version = 2.5
configs = {
"LOQUSDB_SETTINGS": {
"binary_path": loqus_exe,
"version": version,
"config_path": loqus_config,
}
}
# WHEN initialising a loqusdb extension with init app
app = Flask(__name__)
loqus_obj = LoqusDB()
with app.app_context():
app.config = configs
loqus_obj.init_app(app)
# THEN assert that the binary is correct
assert loqus_obj.loqusdb_binary == loqus_exe
# THEN assert that the version is correct
assert loqus_obj.version == version
# THEN assert that the config is correct
assert loqus_obj.loqusdb_config == loqus_config
| 39.1
| 88
| 0.70962
| 701
| 5,083
| 4.917261
| 0.088445
| 0.074267
| 0.085292
| 0.073977
| 0.86597
| 0.844502
| 0.817813
| 0.803597
| 0.789962
| 0.776617
| 0
| 0.001519
| 0.223097
| 5,083
| 129
| 89
| 39.403101
| 0.87136
| 0.339957
| 0
| 0.535211
| 0
| 0
| 0.054557
| 0
| 0
| 0
| 0
| 0
| 0.295775
| 1
| 0.098592
| false
| 0
| 0.056338
| 0
| 0.15493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5868699cc6110ca103ca272a66e68d03e48fa922
| 954
|
py
|
Python
|
pycurlutil.py
|
MattLud/FacebookFilterTracker
|
3e28d1401b18b2296a96c2e970958f5c75629a14
|
[
"Apache-2.0"
] | 3
|
2017-12-04T19:39:59.000Z
|
2019-01-10T06:36:37.000Z
|
pycurlutil.py
|
MattLud/FacebookFilterTracker
|
3e28d1401b18b2296a96c2e970958f5c75629a14
|
[
"Apache-2.0"
] | null | null | null |
pycurlutil.py
|
MattLud/FacebookFilterTracker
|
3e28d1401b18b2296a96c2e970958f5c75629a14
|
[
"Apache-2.0"
] | null | null | null |
import pycurl
import urllib.parse
from collections import defaultdict
from io import BytesIO
import json
def pycurlgetURL(url):
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
def pycurlget(url, params):
buffer = BytesIO()
c = pycurl.Curl()
pairs = urllib.parse.urlencode(params)
c.setopt(c.URL, url+'?'+pairs)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
def pycurlpost(url, params):
buffer = BytesIO()
c = pycurl.Curl()
pairs = urllib.parse.urlencode(params)
c.setopt(c.URL, url)
c.setopt(c.POSTFIELDS, pairs)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return json.loads(body.decode('iso-8859-1'))
| 25.105263
| 48
| 0.648847
| 132
| 954
| 4.689394
| 0.257576
| 0.07916
| 0.090469
| 0.096931
| 0.778675
| 0.739903
| 0.739903
| 0.739903
| 0.704362
| 0.704362
| 0
| 0.019685
| 0.201258
| 954
| 38
| 49
| 25.105263
| 0.792651
| 0
| 0
| 0.714286
| 0
| 0
| 0.032461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.142857
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54536adc105da17f7b301e5af756ef81eb01d793
| 72
|
py
|
Python
|
futures_then/__init__.py
|
dvdotsenko/python-future-then
|
77cfd26bde5cc367226e57eed75853afb85277a7
|
[
"MIT"
] | 19
|
2016-02-10T07:09:11.000Z
|
2020-12-10T18:20:07.000Z
|
futures_then/__init__.py
|
dvdotsenko/python-future-then
|
77cfd26bde5cc367226e57eed75853afb85277a7
|
[
"MIT"
] | 1
|
2020-04-05T08:44:47.000Z
|
2020-04-05T08:44:47.000Z
|
futures_then/__init__.py
|
dvdotsenko/python-future-then
|
77cfd26bde5cc367226e57eed75853afb85277a7
|
[
"MIT"
] | 1
|
2018-07-25T20:35:40.000Z
|
2018-07-25T20:35:40.000Z
|
from .futures_then import ThenableFuture, CircularFuturesChainException
| 36
| 71
| 0.902778
| 6
| 72
| 10.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069444
| 72
| 1
| 72
| 72
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
545e03f12e75de2482a261f3b9e881782b8f0ab7
| 20
|
py
|
Python
|
tests/assets/dependenciespackage/dependenciespackage/subpackage/.hidden/hidden.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 63
|
2018-06-21T10:39:54.000Z
|
2021-06-04T14:28:44.000Z
|
tests/assets/dependenciespackage/dependenciespackage/subpackage/.hidden/hidden.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 86
|
2018-06-20T13:30:30.000Z
|
2019-06-04T12:47:28.000Z
|
tests/assets/dependenciespackage/dependenciespackage/subpackage/.hidden/hidden.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 4
|
2018-08-14T08:49:55.000Z
|
2019-02-16T09:24:47.000Z
|
from . import three
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5471f45dc1bc4eb25dd3bcbc1cbaf956490b5e4e
| 144
|
py
|
Python
|
PythonBackend/calc_model/__init__.py
|
goo-goo-goo-joob/CreditRisks
|
c874941f3787a0c73063883a019a61672e7bef2f
|
[
"Apache-2.0"
] | 1
|
2020-09-19T12:32:45.000Z
|
2020-09-19T12:32:45.000Z
|
PythonBackend/calc_model/__init__.py
|
goo-goo-goo-joob/CreditRisks
|
c874941f3787a0c73063883a019a61672e7bef2f
|
[
"Apache-2.0"
] | null | null | null |
PythonBackend/calc_model/__init__.py
|
goo-goo-goo-joob/CreditRisks
|
c874941f3787a0c73063883a019a61672e7bef2f
|
[
"Apache-2.0"
] | null | null | null |
from .abstract_model import AbstractModel
from .bank_model import BankModel
from .cb_model import CatBoostModel
from .sgd_model import SGDModel
| 28.8
| 41
| 0.861111
| 20
| 144
| 6
| 0.55
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 144
| 4
| 42
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54ba823e4ecb19a5d30399f968df953816f1fb53
| 35
|
tac
|
Python
|
src/main/resources/examples2/taf-A5-2.tac
|
saeidrastak/IWXXMConverter
|
f2c881ce1ec0269791148bd3ed8de3de01ba31dd
|
[
"BSD-3-Clause"
] | 5
|
2016-10-26T06:40:29.000Z
|
2021-06-22T19:21:25.000Z
|
src/main/resources/examples2/taf-A5-2.tac
|
saeidrastak/IWXXMConverter
|
f2c881ce1ec0269791148bd3ed8de3de01ba31dd
|
[
"BSD-3-Clause"
] | null | null | null |
src/main/resources/examples2/taf-A5-2.tac
|
saeidrastak/IWXXMConverter
|
f2c881ce1ec0269791148bd3ed8de3de01ba31dd
|
[
"BSD-3-Clause"
] | 1
|
2020-09-03T14:06:00.000Z
|
2020-09-03T14:06:00.000Z
|
TAF AMD YUDO 161500Z 1606/1624 CNL
| 17.5
| 34
| 0.8
| 7
| 35
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.482759
| 0.171429
| 35
| 1
| 35
| 35
| 0.482759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54c511eaced277fb6dfde11e6a06e59b79926c6c
| 489
|
py
|
Python
|
Python_Img_Humor/src/data/__init__.py
|
limorigu/ComplexCauses
|
e047bea494329e4c4ca0f124c1a44daf900055df
|
[
"CC0-1.0"
] | 4
|
2021-06-11T15:03:05.000Z
|
2022-03-28T10:41:11.000Z
|
Python_Img_Humor/src/data/__init__.py
|
limorigu/ComplexCauses
|
e047bea494329e4c4ca0f124c1a44daf900055df
|
[
"CC0-1.0"
] | null | null | null |
Python_Img_Humor/src/data/__init__.py
|
limorigu/ComplexCauses
|
e047bea494329e4c4ca0f124c1a44daf900055df
|
[
"CC0-1.0"
] | null | null | null |
from data.PertImgSim import get_img_sim_loaders_by_cov, \
ImgSimPert_data_by_cov, get_full_vector_img_sim
from data.Humicroedit import get_humicroedit_loaders_by_cov, \
Humicroedit_data_by_cov, get_full_vector_humicroedit
from data.data_utils import DataIter
__all__ = ['get_img_sim_loaders_by_cov', 'ImgSimPert_data_by_cov', 'get_full_vector_img_sim',
'get_humicroedit_loaders_by_cov', 'Humicroedit_data_by_cov',
'get_full_vector_humicroedit', 'DataIter']
| 44.454545
| 93
| 0.813906
| 72
| 489
| 4.847222
| 0.236111
| 0.114613
| 0.137536
| 0.137536
| 0.739255
| 0.739255
| 0.739255
| 0.739255
| 0.739255
| 0.739255
| 0
| 0
| 0.120654
| 489
| 10
| 94
| 48.9
| 0.811628
| 0
| 0
| 0
| 0
| 0
| 0.32582
| 0.309426
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
54c8235fb8a45ea16b9962c644b5fa38adb247b9
| 104
|
py
|
Python
|
tdsbconnects/__init__.py
|
tylertian123/pytdsbconnects
|
b5f820e125c37150c3f6700fdf0a0d5998f71c52
|
[
"MIT"
] | 3
|
2020-09-17T21:57:25.000Z
|
2020-11-30T06:19:45.000Z
|
tdsbconnects/__init__.py
|
tylertian123/pytdsbconnects
|
b5f820e125c37150c3f6700fdf0a0d5998f71c52
|
[
"MIT"
] | null | null | null |
tdsbconnects/__init__.py
|
tylertian123/pytdsbconnects
|
b5f820e125c37150c3f6700fdf0a0d5998f71c52
|
[
"MIT"
] | null | null | null |
from .tdsbconnects import *
from .objects import *
from .util import *
from .version import __version__
| 20.8
| 32
| 0.778846
| 13
| 104
| 5.923077
| 0.461538
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 104
| 4
| 33
| 26
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49dcb55e57c100f835cd8b5bbdba1b91dbf69186
| 18,654
|
py
|
Python
|
fonts/vector/symbol.py
|
szczys/st7789_mpy
|
bc854ec453d7644ce1773f7ed4d41504f37d376b
|
[
"MIT"
] | 153
|
2020-02-02T11:03:14.000Z
|
2022-03-30T05:47:07.000Z
|
fonts/vector/symbol.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 58
|
2020-04-11T23:23:02.000Z
|
2022-03-26T20:45:23.000Z
|
fonts/vector/symbol.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 50
|
2020-02-02T11:05:23.000Z
|
2022-03-22T15:24:42.000Z
|
WIDTH = 87
HEIGHT = 87
FIRST = 0x20
LAST = 0x7f
_font =\
b'\x00\x4a\x5a\x02\x44\x60\x44\x52\x60\x52\x02\x44\x60\x44\x60'\
b'\x60\x44\x02\x52\x52\x52\x3e\x52\x66\x02\x44\x60\x44\x44\x60'\
b'\x60\x02\x44\x60\x44\x52\x60\x52\x02\x46\x5e\x46\x59\x5e\x4b'\
b'\x02\x4b\x59\x4b\x5e\x59\x46\x02\x52\x52\x52\x44\x52\x60\x02'\
b'\x4b\x59\x4b\x46\x59\x5e\x02\x46\x5e\x46\x4b\x5e\x59\x02\x4b'\
b'\x59\x4b\x52\x59\x52\x02\x4d\x57\x4d\x57\x57\x4d\x02\x52\x52'\
b'\x52\x4b\x52\x59\x02\x4d\x57\x4d\x4d\x57\x57\x07\x47\x52\x52'\
b'\x47\x50\x47\x4d\x48\x4a\x4a\x48\x4d\x47\x50\x47\x52\x07\x47'\
b'\x52\x47\x52\x47\x54\x48\x57\x4a\x5a\x4d\x5c\x50\x5d\x52\x5d'\
b'\x07\x52\x5d\x52\x5d\x54\x5d\x57\x5c\x5a\x5a\x5c\x57\x5d\x54'\
b'\x5d\x52\x07\x52\x5d\x5d\x52\x5d\x50\x5c\x4d\x5a\x4a\x57\x48'\
b'\x54\x47\x52\x47\x08\x44\x60\x44\x4f\x47\x51\x4b\x53\x50\x54'\
b'\x54\x54\x59\x53\x5d\x51\x60\x4f\x08\x50\x55\x55\x44\x53\x47'\
b'\x51\x4b\x50\x50\x50\x54\x51\x59\x53\x5d\x55\x60\x08\x4f\x54'\
b'\x4f\x44\x51\x47\x53\x4b\x54\x50\x54\x54\x53\x59\x51\x5d\x4f'\
b'\x60\x08\x44\x60\x44\x55\x47\x53\x4b\x51\x50\x50\x54\x50\x59'\
b'\x51\x5d\x53\x60\x55\x04\x4b\x59\x52\x4a\x59\x4e\x4b\x56\x52'\
b'\x5a\x04\x4a\x5a\x4a\x52\x4e\x4b\x56\x59\x5a\x52\x04\x4b\x59'\
b'\x4b\x56\x4b\x4e\x59\x56\x59\x4e\x04\x4a\x5a\x4c\x58\x4a\x50'\
b'\x5a\x54\x58\x4c\x16\x4a\x5a\x4a\x5d\x4c\x5d\x4f\x5c\x51\x5b'\
b'\x54\x58\x55\x56\x56\x53\x56\x4f\x55\x4c\x54\x4a\x53\x49\x51'\
b'\x49\x50\x4a\x4f\x4c\x4e\x4f\x4e\x53\x4f\x56\x50\x58\x53\x5b'\
b'\x55\x5c\x58\x5d\x5a\x5d\x16\x49\x5d\x5d\x5a\x5d\x58\x5c\x55'\
b'\x5b\x53\x58\x50\x56\x4f\x53\x4e\x4f\x4e\x4c\x4f\x4a\x50\x49'\
b'\x51\x49\x53\x4a\x54\x4c\x55\x4f\x56\x53\x56\x56\x55\x58\x54'\
b'\x5b\x51\x5c\x4f\x5d\x4c\x5d\x4a\x16\x4a\x5a\x5a\x47\x58\x47'\
b'\x55\x48\x53\x49\x50\x4c\x4f\x4e\x4e\x51\x4e\x55\x4f\x58\x50'\
b'\x5a\x51\x5b\x53\x5b\x54\x5a\x55\x58\x56\x55\x56\x51\x55\x4e'\
b'\x54\x4c\x51\x49\x4f\x48\x4c\x47\x4a\x47\x16\x47\x5b\x47\x4a'\
b'\x47\x4c\x48\x4f\x49\x51\x4c\x54\x4e\x55\x51\x56\x55\x56\x58'\
b'\x55\x5a\x54\x5b\x53\x5b\x51\x5a\x50\x58\x4f\x55\x4e\x51\x4e'\
b'\x4e\x4f\x4c\x50\x49\x53\x48\x55\x47\x58\x47\x5a\x14\x45\x5b'\
b'\x45\x50\x46\x52\x48\x54\x4a\x55\x4d\x56\x51\x56\x55\x55\x58'\
b'\x53\x5a\x50\x5b\x4e\x5a\x4c\x57\x4c\x53\x4d\x51\x4e\x4e\x50'\
b'\x4c\x53\x4b\x56\x4b\x59\x4c\x5c\x4d\x5e\x12\x45\x59\x45\x54'\
b'\x48\x56\x4b\x57\x50\x57\x53\x56\x56\x54\x58\x51\x59\x4e\x59'\
b'\x4c\x58\x4b\x56\x4b\x53\x4c\x50\x4e\x4e\x51\x4d\x54\x4d\x59'\
b'\x4e\x5c\x50\x5f\x19\x4f\x55\x51\x4f\x4f\x51\x4f\x53\x51\x55'\
b'\x53\x55\x55\x53\x55\x51\x53\x4f\x51\x4f\x20\x52\x51\x50\x50'\
b'\x51\x50\x53\x51\x54\x53\x54\x54\x53\x54\x51\x53\x50\x51\x50'\
b'\x20\x52\x52\x51\x51\x52\x52\x53\x53\x52\x52\x51\x0a\x52\x57'\
b'\x52\x4d\x53\x4d\x55\x4e\x56\x4f\x57\x51\x57\x53\x56\x55\x55'\
b'\x56\x53\x57\x52\x57\x08\x44\x60\x44\x52\x4a\x52\x20\x52\x4f'\
b'\x52\x55\x52\x20\x52\x5a\x52\x60\x52\x04\x44\x60\x44\x55\x44'\
b'\x4f\x60\x4f\x60\x55\x05\x4a\x5a\x52\x44\x4a\x52\x20\x52\x52'\
b'\x44\x5a\x52\x08\x44\x60\x44\x52\x60\x52\x20\x52\x4a\x59\x5a'\
b'\x59\x20\x52\x50\x60\x54\x60\x08\x44\x60\x44\x52\x60\x52\x20'\
b'\x52\x44\x52\x52\x62\x20\x52\x60\x52\x52\x62\x11\x4b\x59\x51'\
b'\x4b\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59'\
b'\x53\x59\x56\x58\x58\x56\x59\x53\x59\x51\x58\x4e\x56\x4c\x53'\
b'\x4b\x51\x4b\x05\x4c\x58\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c'\
b'\x4c\x04\x4b\x59\x52\x4a\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58'\
b'\x52\x48\x4c\x52\x52\x5c\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49'\
b'\x50\x4f\x4a\x4f\x4f\x53\x4d\x59\x52\x55\x57\x59\x55\x53\x5a'\
b'\x4f\x54\x4f\x52\x49\x05\x4b\x59\x52\x4b\x52\x59\x20\x52\x4b'\
b'\x52\x59\x52\x05\x4d\x57\x4d\x4d\x57\x57\x20\x52\x57\x4d\x4d'\
b'\x57\x08\x4d\x57\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20'\
b'\x52\x57\x4f\x4d\x55\x22\x4e\x56\x51\x4e\x4f\x4f\x4e\x51\x4e'\
b'\x53\x4f\x55\x51\x56\x53\x56\x55\x55\x56\x53\x56\x51\x55\x4f'\
b'\x53\x4e\x51\x4e\x20\x52\x4f\x51\x4f\x53\x20\x52\x50\x50\x50'\
b'\x54\x20\x52\x51\x4f\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52'\
b'\x53\x4f\x53\x55\x20\x52\x54\x50\x54\x54\x20\x52\x55\x51\x55'\
b'\x53\x1a\x4e\x56\x4e\x4e\x4e\x56\x56\x56\x56\x4e\x4e\x4e\x20'\
b'\x52\x4f\x4f\x4f\x55\x20\x52\x50\x4f\x50\x55\x20\x52\x51\x4f'\
b'\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20'\
b'\x52\x54\x4f\x54\x55\x20\x52\x55\x4f\x55\x55\x10\x4d\x57\x52'\
b'\x4c\x4d\x55\x57\x55\x52\x4c\x20\x52\x52\x4f\x4f\x54\x20\x52'\
b'\x52\x4f\x55\x54\x20\x52\x52\x52\x51\x54\x20\x52\x52\x52\x53'\
b'\x54\x10\x4c\x55\x4c\x52\x55\x57\x55\x4d\x4c\x52\x20\x52\x4f'\
b'\x52\x54\x55\x20\x52\x4f\x52\x54\x4f\x20\x52\x52\x52\x54\x53'\
b'\x20\x52\x52\x52\x54\x51\x10\x4d\x57\x52\x58\x57\x4f\x4d\x4f'\
b'\x52\x58\x20\x52\x52\x55\x55\x50\x20\x52\x52\x55\x4f\x50\x20'\
b'\x52\x52\x52\x53\x50\x20\x52\x52\x52\x51\x50\x10\x4f\x58\x58'\
b'\x52\x4f\x4d\x4f\x57\x58\x52\x20\x52\x55\x52\x50\x4f\x20\x52'\
b'\x55\x52\x50\x55\x20\x52\x52\x52\x50\x51\x20\x52\x52\x52\x50'\
b'\x53\x0a\x52\x59\x52\x4b\x52\x59\x20\x52\x52\x4b\x59\x4e\x52'\
b'\x51\x20\x52\x53\x4d\x56\x4e\x53\x4f\x14\x49\x5b\x52\x47\x52'\
b'\x56\x20\x52\x4d\x4a\x57\x50\x20\x52\x57\x4a\x4d\x50\x20\x52'\
b'\x49\x56\x4c\x5c\x20\x52\x5b\x56\x58\x5c\x20\x52\x49\x56\x5b'\
b'\x56\x20\x52\x4c\x5c\x58\x5c\x0c\x4d\x57\x52\x4c\x52\x58\x20'\
b'\x52\x4f\x4f\x55\x4f\x20\x52\x4d\x55\x4f\x57\x51\x58\x53\x58'\
b'\x55\x57\x57\x55\x0a\x4c\x58\x52\x4c\x52\x58\x20\x52\x4c\x51'\
b'\x4d\x4f\x57\x4f\x58\x51\x20\x52\x50\x57\x54\x57\x0d\x4b\x59'\
b'\x4d\x4e\x57\x58\x20\x52\x57\x4e\x4d\x58\x20\x52\x4f\x4c\x4c'\
b'\x4f\x4b\x51\x20\x52\x55\x4c\x58\x4f\x59\x51\x11\x49\x5b\x4e'\
b'\x49\x49\x5b\x20\x52\x56\x49\x5b\x5b\x20\x52\x4d\x4d\x5b\x5b'\
b'\x20\x52\x57\x4d\x49\x5b\x20\x52\x4e\x49\x56\x49\x20\x52\x4d'\
b'\x4d\x57\x4d\x02\x4b\x59\x4b\x46\x59\x5e\x0a\x47\x5b\x4d\x4a'\
b'\x53\x56\x20\x52\x4b\x50\x53\x4c\x20\x52\x47\x5c\x5b\x5c\x5b'\
b'\x52\x47\x5c\x0d\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50'\
b'\x54\x50\x58\x54\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c'\
b'\x50\x4c\x1f\x4b\x59\x59\x50\x58\x4e\x56\x4c\x53\x4b\x51\x4b'\
b'\x4e\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53'\
b'\x59\x56\x58\x58\x56\x59\x54\x20\x52\x59\x50\x57\x4e\x55\x4d'\
b'\x53\x4d\x51\x4e\x50\x4f\x4f\x51\x4f\x53\x50\x55\x51\x56\x53'\
b'\x57\x55\x57\x57\x56\x59\x54\x09\x4b\x59\x52\x4a\x4b\x56\x59'\
b'\x56\x52\x4a\x20\x52\x52\x5a\x59\x4e\x4b\x4e\x52\x5a\x21\x47'\
b'\x5d\x50\x49\x50\x47\x51\x46\x53\x46\x54\x47\x54\x49\x20\x52'\
b'\x47\x5a\x48\x58\x4a\x56\x4b\x54\x4c\x50\x4c\x4b\x4d\x4a\x4f'\
b'\x49\x55\x49\x57\x4a\x58\x4b\x58\x50\x59\x54\x5a\x56\x5c\x58'\
b'\x5d\x5a\x20\x52\x47\x5a\x5d\x5a\x20\x52\x51\x5a\x50\x5b\x51'\
b'\x5c\x53\x5c\x54\x5b\x53\x5a\x3f\x4a\x5a\x52\x4d\x52\x53\x20'\
b'\x52\x52\x53\x51\x5c\x20\x52\x52\x53\x53\x5c\x20\x52\x51\x5c'\
b'\x53\x5c\x20\x52\x52\x4d\x51\x4a\x50\x48\x4e\x47\x20\x52\x51'\
b'\x4a\x4e\x47\x20\x52\x52\x4d\x53\x4a\x54\x48\x56\x47\x20\x52'\
b'\x53\x4a\x56\x47\x20\x52\x52\x4d\x4e\x4b\x4c\x4b\x4a\x4d\x20'\
b'\x52\x50\x4c\x4c\x4c\x4a\x4d\x20\x52\x52\x4d\x56\x4b\x58\x4b'\
b'\x5a\x4d\x20\x52\x54\x4c\x58\x4c\x5a\x4d\x20\x52\x52\x4d\x50'\
b'\x4e\x4f\x4f\x4f\x52\x20\x52\x52\x4d\x50\x4f\x4f\x52\x20\x52'\
b'\x52\x4d\x54\x4e\x55\x4f\x55\x52\x20\x52\x52\x4d\x54\x4f\x55'\
b'\x52\x5d\x4a\x5a\x52\x49\x52\x4b\x20\x52\x52\x4e\x52\x50\x20'\
b'\x52\x52\x53\x52\x55\x20\x52\x52\x59\x51\x5c\x20\x52\x52\x59'\
b'\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20\x52\x52\x47\x51\x49\x50'\
b'\x4a\x20\x52\x52\x47\x53\x49\x54\x4a\x20\x52\x50\x4a\x52\x49'\
b'\x54\x4a\x20\x52\x52\x4b\x50\x4e\x4e\x4f\x4d\x4e\x20\x52\x52'\
b'\x4b\x54\x4e\x56\x4f\x57\x4e\x20\x52\x4e\x4f\x50\x4f\x52\x4e'\
b'\x54\x4f\x56\x4f\x20\x52\x52\x50\x50\x53\x4e\x54\x4c\x54\x4b'\
b'\x52\x4b\x53\x4c\x54\x20\x52\x52\x50\x54\x53\x56\x54\x58\x54'\
b'\x59\x52\x59\x53\x58\x54\x20\x52\x4e\x54\x50\x54\x52\x53\x54'\
b'\x54\x56\x54\x20\x52\x52\x55\x50\x58\x4f\x59\x4d\x5a\x4c\x5a'\
b'\x4b\x59\x4a\x57\x4a\x59\x4c\x5a\x20\x52\x52\x55\x54\x58\x55'\
b'\x59\x57\x5a\x58\x5a\x59\x59\x5a\x57\x5a\x59\x58\x5a\x20\x52'\
b'\x4d\x5a\x4f\x5a\x52\x59\x55\x5a\x57\x5a\x27\x4a\x5a\x52\x59'\
b'\x51\x5c\x20\x52\x52\x59\x53\x5c\x20\x52\x51\x5c\x53\x5c\x20'\
b'\x52\x52\x59\x55\x5a\x58\x5a\x5a\x58\x5a\x55\x59\x54\x57\x54'\
b'\x59\x52\x5a\x4f\x59\x4d\x57\x4c\x55\x4d\x56\x4a\x55\x48\x53'\
b'\x47\x51\x47\x4f\x48\x4e\x4a\x4f\x4d\x4d\x4c\x4b\x4d\x4a\x4f'\
b'\x4b\x52\x4d\x54\x4b\x54\x4a\x55\x4a\x58\x4c\x5a\x4f\x5a\x52'\
b'\x59\x1f\x4a\x5a\x52\x59\x51\x5c\x20\x52\x52\x59\x53\x5c\x20'\
b'\x52\x51\x5c\x53\x5c\x20\x52\x52\x59\x56\x58\x56\x56\x58\x55'\
b'\x58\x52\x5a\x51\x5a\x4c\x59\x49\x58\x48\x56\x48\x54\x47\x50'\
b'\x47\x4e\x48\x4c\x48\x4b\x49\x4a\x4c\x4a\x51\x4c\x52\x4c\x55'\
b'\x4e\x56\x4e\x58\x52\x59\x0e\x49\x5b\x49\x50\x4b\x52\x20\x52'\
b'\x4c\x4b\x4e\x50\x20\x52\x52\x47\x52\x4f\x20\x52\x58\x4b\x56'\
b'\x50\x20\x52\x5b\x50\x59\x52\x1b\x47\x5d\x49\x49\x4a\x4b\x4b'\
b'\x4f\x4b\x55\x4a\x59\x49\x5b\x20\x52\x5b\x49\x5a\x4b\x59\x4f'\
b'\x59\x55\x5a\x59\x5b\x5b\x20\x52\x49\x49\x4b\x4a\x4f\x4b\x55'\
b'\x4b\x59\x4a\x5b\x49\x20\x52\x49\x5b\x4b\x5a\x4f\x59\x55\x59'\
b'\x59\x5a\x5b\x5b\x36\x46\x5e\x52\x52\x52\x5b\x51\x5c\x20\x52'\
b'\x52\x56\x51\x5c\x20\x52\x52\x49\x51\x48\x4f\x48\x4e\x49\x4e'\
b'\x4b\x4f\x4e\x52\x52\x20\x52\x52\x49\x53\x48\x55\x48\x56\x49'\
b'\x56\x4b\x55\x4e\x52\x52\x20\x52\x52\x52\x4e\x4f\x4c\x4e\x4a'\
b'\x4e\x49\x4f\x49\x51\x4a\x52\x20\x52\x52\x52\x56\x4f\x58\x4e'\
b'\x5a\x4e\x5b\x4f\x5b\x51\x5a\x52\x20\x52\x52\x52\x4e\x55\x4c'\
b'\x56\x4a\x56\x49\x55\x49\x53\x4a\x52\x20\x52\x52\x52\x56\x55'\
b'\x58\x56\x5a\x56\x5b\x55\x5b\x53\x5a\x52\x2d\x4a\x5a\x55\x49'\
b'\x54\x4a\x55\x4b\x56\x4a\x56\x49\x55\x47\x53\x46\x51\x46\x4f'\
b'\x47\x4e\x49\x4e\x4b\x4f\x4d\x51\x4f\x56\x52\x20\x52\x4f\x4d'\
b'\x54\x50\x56\x52\x57\x54\x57\x56\x56\x58\x54\x5a\x20\x52\x50'\
b'\x4e\x4e\x50\x4d\x52\x4d\x54\x4e\x56\x50\x58\x55\x5b\x20\x52'\
b'\x4e\x56\x53\x59\x55\x5b\x56\x5d\x56\x5f\x55\x61\x53\x62\x51'\
b'\x62\x4f\x61\x4e\x5f\x4e\x5e\x4f\x5d\x50\x5e\x4f\x5f\x1d\x4a'\
b'\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52\x46'\
b'\x52\x62\x20\x52\x52\x51\x51\x54\x52\x62\x53\x54\x52\x51\x20'\
b'\x52\x4c\x4d\x4e\x4e\x50\x4d\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d'\
b'\x58\x4d\x20\x52\x54\x4d\x56\x4e\x58\x4d\x56\x4c\x54\x4d\x37'\
b'\x4a\x5a\x52\x46\x51\x48\x52\x4a\x53\x48\x52\x46\x20\x52\x52'\
b'\x46\x52\x54\x20\x52\x52\x50\x51\x52\x53\x56\x52\x58\x51\x56'\
b'\x53\x52\x52\x50\x20\x52\x52\x54\x52\x62\x20\x52\x52\x5e\x51'\
b'\x60\x52\x62\x53\x60\x52\x5e\x20\x52\x4c\x4d\x4e\x4e\x50\x4d'\
b'\x4e\x4c\x4c\x4d\x20\x52\x4c\x4d\x58\x4d\x20\x52\x54\x4d\x56'\
b'\x4e\x58\x4d\x56\x4c\x54\x4d\x20\x52\x4c\x5b\x4e\x5c\x50\x5b'\
b'\x4e\x5a\x4c\x5b\x20\x52\x4c\x5b\x58\x5b\x20\x52\x54\x5b\x56'\
b'\x5c\x58\x5b\x56\x5a\x54\x5b\x11\x45\x5f\x52\x49\x51\x4a\x52'\
b'\x4b\x53\x4a\x52\x49\x20\x52\x49\x59\x48\x5a\x49\x5b\x4a\x5a'\
b'\x49\x59\x20\x52\x5b\x59\x5a\x5a\x5b\x5b\x5c\x5a\x5b\x59\x20'\
b'\x46\x5e\x52\x48\x4e\x4c\x4b\x50\x4a\x53\x4a\x55\x4b\x57\x4d'\
b'\x58\x4f\x58\x51\x57\x52\x55\x20\x52\x52\x48\x56\x4c\x59\x50'\
b'\x5a\x53\x5a\x55\x59\x57\x57\x58\x55\x58\x53\x57\x52\x55\x20'\
b'\x52\x52\x55\x51\x59\x50\x5c\x20\x52\x52\x55\x53\x59\x54\x5c'\
b'\x20\x52\x50\x5c\x54\x5c\x19\x46\x5e\x52\x4e\x51\x4b\x50\x49'\
b'\x4e\x48\x4d\x48\x4b\x49\x4a\x4b\x4a\x4f\x4b\x52\x4c\x54\x4e'\
b'\x57\x52\x5c\x20\x52\x52\x4e\x53\x4b\x54\x49\x56\x48\x57\x48'\
b'\x59\x49\x5a\x4b\x5a\x4f\x59\x52\x58\x54\x56\x57\x52\x5c\x13'\
b'\x46\x5e\x52\x47\x50\x4a\x4c\x4f\x49\x52\x20\x52\x52\x47\x54'\
b'\x4a\x58\x4f\x5b\x52\x20\x52\x49\x52\x4c\x55\x50\x5a\x52\x5d'\
b'\x20\x52\x5b\x52\x58\x55\x54\x5a\x52\x5d\x2f\x46\x5e\x52\x54'\
b'\x54\x57\x56\x58\x58\x58\x5a\x57\x5b\x55\x5b\x53\x5a\x51\x58'\
b'\x50\x56\x50\x53\x51\x20\x52\x53\x51\x55\x4f\x56\x4d\x56\x4b'\
b'\x55\x49\x53\x48\x51\x48\x4f\x49\x4e\x4b\x4e\x4d\x4f\x4f\x51'\
b'\x51\x20\x52\x51\x51\x4e\x50\x4c\x50\x4a\x51\x49\x53\x49\x55'\
b'\x4a\x57\x4c\x58\x4e\x58\x50\x57\x52\x54\x20\x52\x52\x54\x51'\
b'\x59\x50\x5c\x20\x52\x52\x54\x53\x59\x54\x5c\x20\x52\x50\x5c'\
b'\x54\x5c\x2f\x49\x5b\x56\x2b\x53\x2d\x51\x2f\x50\x31\x4f\x34'\
b'\x4f\x38\x50\x3c\x54\x44\x55\x47\x55\x4a\x54\x4d\x52\x50\x20'\
b'\x52\x53\x2d\x51\x30\x50\x34\x50\x38\x51\x3b\x55\x43\x56\x47'\
b'\x56\x4a\x55\x4d\x52\x50\x4e\x52\x52\x54\x55\x57\x56\x5a\x56'\
b'\x5d\x55\x61\x51\x69\x50\x6c\x50\x70\x51\x74\x53\x77\x20\x52'\
b'\x52\x54\x54\x57\x55\x5a\x55\x5d\x54\x60\x50\x68\x4f\x6c\x4f'\
b'\x70\x50\x73\x51\x75\x53\x77\x56\x79\x2f\x49\x5b\x4e\x2b\x51'\
b'\x2d\x53\x2f\x54\x31\x55\x34\x55\x38\x54\x3c\x50\x44\x4f\x47'\
b'\x4f\x4a\x50\x4d\x52\x50\x20\x52\x51\x2d\x53\x30\x54\x34\x54'\
b'\x38\x53\x3b\x4f\x43\x4e\x47\x4e\x4a\x4f\x4d\x52\x50\x56\x52'\
b'\x52\x54\x4f\x57\x4e\x5a\x4e\x5d\x4f\x61\x53\x69\x54\x6c\x54'\
b'\x70\x53\x74\x51\x77\x20\x52\x52\x54\x50\x57\x4f\x5a\x4f\x5d'\
b'\x50\x60\x54\x68\x55\x6c\x55\x70\x54\x73\x53\x75\x51\x77\x4e'\
b'\x79\x1f\x49\x5b\x56\x2e\x53\x31\x51\x34\x4f\x38\x4e\x3d\x4e'\
b'\x43\x4f\x49\x50\x4d\x53\x58\x54\x5c\x55\x62\x55\x67\x54\x6c'\
b'\x53\x6f\x51\x73\x20\x52\x53\x31\x51\x35\x50\x38\x4f\x3d\x4f'\
b'\x42\x50\x48\x51\x4c\x54\x57\x55\x5b\x56\x61\x56\x67\x55\x6c'\
b'\x53\x70\x51\x73\x4e\x76\x1f\x49\x5b\x4e\x2e\x51\x31\x53\x34'\
b'\x55\x38\x56\x3d\x56\x43\x55\x49\x54\x4d\x51\x58\x50\x5c\x4f'\
b'\x62\x4f\x67\x50\x6c\x51\x6f\x53\x73\x20\x52\x51\x31\x53\x35'\
b'\x54\x38\x55\x3d\x55\x42\x54\x48\x53\x4c\x50\x57\x4f\x5b\x4e'\
b'\x61\x4e\x67\x4f\x6c\x51\x70\x53\x73\x56\x76\x0d\x37\x5a\x3a'\
b'\x52\x41\x52\x52\x6f\x20\x52\x40\x52\x51\x6f\x20\x52\x3f\x52'\
b'\x52\x72\x20\x52\x5a\x22\x56\x4a\x52\x72\x1a\x49\x5b\x54\x4d'\
b'\x56\x4e\x58\x50\x58\x4f\x57\x4e\x54\x4d\x51\x4d\x4e\x4e\x4d'\
b'\x4f\x4c\x51\x4c\x53\x4d\x55\x4f\x57\x53\x5a\x20\x52\x51\x4d'\
b'\x4f\x4e\x4e\x4f\x4d\x51\x4d\x53\x4e\x55\x53\x5a\x54\x5c\x54'\
b'\x5e\x53\x5f\x51\x5f\x2c\x47\x5d\x4c\x4d\x4b\x4e\x4a\x50\x4a'\
b'\x52\x4b\x55\x4f\x59\x50\x5b\x20\x52\x4a\x52\x4b\x54\x4f\x58'\
b'\x50\x5b\x50\x5d\x4f\x60\x4d\x62\x4c\x62\x4b\x61\x4a\x5f\x4a'\
b'\x5c\x4b\x58\x4d\x54\x4f\x51\x52\x4e\x54\x4d\x56\x4d\x59\x4e'\
b'\x5a\x50\x5a\x54\x59\x58\x57\x5a\x55\x5b\x54\x5b\x53\x5a\x53'\
b'\x58\x54\x57\x55\x58\x54\x59\x20\x52\x56\x4d\x58\x4e\x59\x50'\
b'\x59\x54\x58\x58\x57\x5a\x44\x45\x5f\x59\x47\x58\x48\x59\x49'\
b'\x5a\x48\x59\x47\x57\x46\x54\x46\x51\x47\x4f\x49\x4e\x4b\x4d'\
b'\x4e\x4c\x52\x4a\x5b\x49\x5f\x48\x61\x20\x52\x54\x46\x52\x47'\
b'\x50\x49\x4f\x4b\x4e\x4e\x4c\x57\x4b\x5b\x4a\x5e\x49\x60\x48'\
b'\x61\x46\x62\x44\x62\x43\x61\x43\x60\x44\x5f\x45\x60\x44\x61'\
b'\x20\x52\x5f\x47\x5e\x48\x5f\x49\x60\x48\x60\x47\x5f\x46\x5d'\
b'\x46\x5b\x47\x5a\x48\x59\x4a\x58\x4d\x55\x5b\x54\x5f\x53\x61'\
b'\x20\x52\x5d\x46\x5b\x48\x5a\x4a\x59\x4e\x57\x57\x56\x5b\x55'\
b'\x5e\x54\x60\x53\x61\x51\x62\x4f\x62\x4e\x61\x4e\x60\x4f\x5f'\
b'\x50\x60\x4f\x61\x20\x52\x49\x4d\x5e\x4d\x33\x46\x5e\x5b\x47'\
b'\x5a\x48\x5b\x49\x5c\x48\x5b\x47\x58\x46\x55\x46\x52\x47\x50'\
b'\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f\x49\x61\x20\x52'\
b'\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d\x57\x4c\x5b\x4b'\
b'\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61\x44\x60\x45\x5f'\
b'\x46\x60\x45\x61\x20\x52\x59\x4d\x57\x54\x56\x58\x56\x5a\x57'\
b'\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5a\x4d\x58\x54\x57\x58'\
b'\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x5a\x4d\x35\x46\x5e\x59\x47'\
b'\x58\x48\x59\x49\x5a\x48\x5a\x47\x58\x46\x20\x52\x5c\x46\x55'\
b'\x46\x52\x47\x50\x49\x4f\x4b\x4e\x4e\x4d\x52\x4b\x5b\x4a\x5f'\
b'\x49\x61\x20\x52\x55\x46\x53\x47\x51\x49\x50\x4b\x4f\x4e\x4d'\
b'\x57\x4c\x5b\x4b\x5e\x4a\x60\x49\x61\x47\x62\x45\x62\x44\x61'\
b'\x44\x60\x45\x5f\x46\x60\x45\x61\x20\x52\x5b\x46\x57\x54\x56'\
b'\x58\x56\x5a\x57\x5b\x5a\x5b\x5c\x59\x5d\x57\x20\x52\x5c\x46'\
b'\x58\x54\x57\x58\x57\x5a\x58\x5b\x20\x52\x4a\x4d\x59\x4d\x55'\
b'\x40\x63\x54\x47\x53\x48\x54\x49\x55\x48\x54\x47\x52\x46\x4f'\
b'\x46\x4c\x47\x4a\x49\x49\x4b\x48\x4e\x47\x52\x45\x5b\x44\x5f'\
b'\x43\x61\x20\x52\x4f\x46\x4d\x47\x4b\x49\x4a\x4b\x49\x4e\x47'\
b'\x57\x46\x5b\x45\x5e\x44\x60\x43\x61\x41\x62\x3f\x62\x3e\x61'\
b'\x3e\x60\x3f\x5f\x40\x60\x3f\x61\x20\x52\x60\x47\x5f\x48\x60'\
b'\x49\x61\x48\x60\x47\x5d\x46\x5a\x46\x57\x47\x55\x49\x54\x4b'\
b'\x53\x4e\x52\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58'\
b'\x47\x56\x49\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60'\
b'\x4e\x61\x4c\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a'\
b'\x61\x20\x52\x5e\x4d\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b'\
b'\x61\x59\x62\x57\x20\x52\x5f\x4d\x5d\x54\x5c\x58\x5c\x5a\x5d'\
b'\x5b\x20\x52\x44\x4d\x5f\x4d\x57\x40\x63\x54\x47\x53\x48\x54'\
b'\x49\x55\x48\x54\x47\x52\x46\x4f\x46\x4c\x47\x4a\x49\x49\x4b'\
b'\x48\x4e\x47\x52\x45\x5b\x44\x5f\x43\x61\x20\x52\x4f\x46\x4d'\
b'\x47\x4b\x49\x4a\x4b\x49\x4e\x47\x57\x46\x5b\x45\x5e\x44\x60'\
b'\x43\x61\x41\x62\x3f\x62\x3e\x61\x3e\x60\x3f\x5f\x40\x60\x3f'\
b'\x61\x20\x52\x5e\x47\x5d\x48\x5e\x49\x5f\x48\x5f\x47\x5d\x46'\
b'\x20\x52\x61\x46\x5a\x46\x57\x47\x55\x49\x54\x4b\x53\x4e\x52'\
b'\x52\x50\x5b\x4f\x5f\x4e\x61\x20\x52\x5a\x46\x58\x47\x56\x49'\
b'\x55\x4b\x54\x4e\x52\x57\x51\x5b\x50\x5e\x4f\x60\x4e\x61\x4c'\
b'\x62\x4a\x62\x49\x61\x49\x60\x4a\x5f\x4b\x60\x4a\x61\x20\x52'\
b'\x60\x46\x5c\x54\x5b\x58\x5b\x5a\x5c\x5b\x5f\x5b\x61\x59\x62'\
b'\x57\x20\x52\x61\x46\x5d\x54\x5c\x58\x5c\x5a\x5d\x5b\x20\x52'\
b'\x44\x4d\x5e\x4d\x13\x4c\x59\x4d\x51\x4e\x4f\x50\x4d\x53\x4d'\
b'\x54\x4e\x54\x51\x52\x57\x52\x5a\x53\x5b\x20\x52\x52\x4d\x53'\
b'\x4e\x53\x51\x51\x57\x51\x5a\x52\x5b\x55\x5b\x57\x59\x58\x57'\
b'\x15\x4c\x58\x52\x4c\x4e\x57\x58\x50\x4c\x50\x56\x57\x52\x4c'\
b'\x20\x52\x52\x52\x52\x4c\x20\x52\x52\x52\x4c\x50\x20\x52\x52'\
b'\x52\x4e\x57\x20\x52\x52\x52\x56\x57\x20\x52\x52\x52\x58\x50'\
b'\x17\x46\x5e\x49\x55\x49\x53\x4a\x50\x4c\x4f\x4e\x4f\x50\x50'\
b'\x54\x53\x56\x54\x58\x54\x5a\x53\x5b\x51\x20\x52\x49\x53\x4a'\
b'\x51\x4c\x50\x4e\x50\x50\x51\x54\x54\x56\x55\x58\x55\x5a\x54'\
b'\x5b\x51\x5b\x4f'
_index =\
b'\x00\x00\x03\x00\x0a\x00\x11\x00\x18\x00\x1f\x00\x26\x00\x2d'\
b'\x00\x34\x00\x3b\x00\x42\x00\x49\x00\x50\x00\x57\x00\x5e\x00'\
b'\x65\x00\x76\x00\x87\x00\x98\x00\xa9\x00\xbc\x00\xcf\x00\xe2'\
b'\x00\xf5\x00\x00\x01\x0b\x01\x16\x01\x21\x01\x50\x01\x7f\x01'\
b'\xae\x01\xdd\x01\x08\x02\x2f\x02\x64\x02\x7b\x02\x8e\x02\x99'\
b'\x02\xa6\x02\xb9\x02\xcc\x02\xf1\x02\xfe\x02\x09\x03\x16\x03'\
b'\x2f\x03\x3c\x03\x49\x03\x5c\x03\xa3\x03\xda\x03\xfd\x03\x20'\
b'\x04\x43\x04\x66\x04\x7d\x04\xa8\x04\xc3\x04\xda\x04\xf7\x04'\
b'\x1c\x05\x23\x05\x3a\x05\x57\x05\x98\x05\xad\x05\xf2\x05\x73'\
b'\x06\x30\x07\x81\x07\xc2\x07\xe1\x07\x1a\x08\x89\x08\xe6\x08'\
b'\x23\x09\x94\x09\xb9\x09\xfc\x09\x31\x0a\x5a\x0a\xbb\x0a\x1c'\
b'\x0b\x7d\x0b\xbe\x0b\xff\x0b\x1c\x0c\x53\x0c\xae\x0c\x39\x0d'\
b'\xa2\x0d\x0f\x0e\xbc\x0e\x6d\x0f\x96\x0f\xc3\x0f'
INDEX = memoryview(_index)
FONT = memoryview(_font)
| 62.597315
| 64
| 0.707462
| 4,578
| 18,654
| 2.881826
| 0.03495
| 0.08459
| 0.04366
| 0.013644
| 0.238308
| 0.174259
| 0.14394
| 0.102554
| 0.0689
| 0.05321
| 0
| 0.38805
| 0.016672
| 18,654
| 297
| 65
| 62.808081
| 0.331189
| 0
| 0
| 0
| 0
| 0.969388
| 0.916908
| 0.91605
| 0
| 1
| 0.000429
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49e790b5c991b0fbfc57fdccf92057294e4f2557
| 9,324
|
py
|
Python
|
tests/unit/test_dataset.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_dataset.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | 4
|
2022-02-16T20:53:24.000Z
|
2022-02-16T21:39:26.000Z
|
tests/unit/test_dataset.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | 1
|
2022-03-20T23:47:16.000Z
|
2022-03-20T23:47:16.000Z
|
import shutil
import unittest
from pathlib import Path
import torch
from frarch import datasets
from frarch.utils.exceptions import DatasetNotFoundError
DATA_FOLDER = Path(__file__).resolve().parent.parent / "data"
class TestCaltech101(unittest.TestCase):
MOCK_DATASET_ROOT = DATA_FOLDER / "caltech101"
trainlst_path = MOCK_DATASET_ROOT / "train.lst"
validlst_path = MOCK_DATASET_ROOT / "valid.lst"
classjson_path = MOCK_DATASET_ROOT / "classes.json"
classes = (DATA_FOLDER / "caltech101_classes.txt").read_text().split(",")
@classmethod
def setUpClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
for c in cls.classes:
(cls.MOCK_DATASET_ROOT / c).mkdir(parents=True, exist_ok=True)
for i in range(10):
(cls.MOCK_DATASET_ROOT / c / f"{i}.jpg").touch()
@classmethod
def tearDownClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
def tearDown(self):
if self.trainlst_path.exists():
self.trainlst_path.unlink()
if self.validlst_path.exists():
self.validlst_path.unlink()
if self.classjson_path.exists():
self.classjson_path.unlink()
return super().tearDown()
def test_build_caltech101_train(self):
dataset = datasets.Caltech101("train", root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, dict)
self.assertEquals(len(dataset.classes), 101)
self.assertEquals(len(dataset.images), 909)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertEquals(dataset.mapper_path, self.classjson_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
self.assertTrue(self.classjson_path.exists())
def test_build_caltech101_valid(self):
dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, dict)
self.assertEquals(len(dataset.classes), 101)
self.assertEquals(len(dataset.images), 101)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertEquals(dataset.mapper_path, self.classjson_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
self.assertTrue(self.classjson_path.exists())
def test_caltech101_not_valid_subset(self):
with self.assertRaises(ValueError):
datasets.Caltech101("nope", root=self.MOCK_DATASET_ROOT)
def test_caltech101_path_no_files(self):
with self.assertRaises(DatasetNotFoundError):
datasets.Caltech101("train", root="./nope/")
def test_caltech101_get_length(self):
dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT)
self.assertEqual(len(dataset), 101)
def test_caltech101_get_num_classes(self):
dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT)
self.assertEqual(dataset.get_number_classes(), 101)
class TestMit67(unittest.TestCase):
MOCK_DATASET_ROOT = DATA_FOLDER / "mit67"
trainlst_path = MOCK_DATASET_ROOT / "train.lst"
validlst_path = MOCK_DATASET_ROOT / "valid.lst"
classjson_path = MOCK_DATASET_ROOT / "class_map.json"
classes = (DATA_FOLDER / "mit67_classes.txt").read_text().split(",")
@classmethod
def setUpClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
for c in cls.classes:
(cls.MOCK_DATASET_ROOT / "Images" / c).mkdir(parents=True, exist_ok=True)
for i in range(10):
(cls.MOCK_DATASET_ROOT / "Images" / c / f"{i}.jpg").touch()
@classmethod
def tearDownClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
def tearDown(self):
if self.trainlst_path.exists():
self.trainlst_path.unlink()
if self.validlst_path.exists():
self.validlst_path.unlink()
if self.classjson_path.exists():
self.classjson_path.unlink()
return super().tearDown()
def test_build_mit67_train(self):
dataset = datasets.Mit67(True, root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, dict)
self.assertEquals(len(dataset.classes), 67)
self.assertEquals(len(dataset.images), 603)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertEquals(dataset.mapper_path, self.classjson_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
self.assertTrue(self.classjson_path.exists())
def test_build_mit67_valid(self):
dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, dict)
self.assertEquals(len(dataset.classes), 67)
self.assertEquals(len(dataset.images), 67)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertEquals(dataset.mapper_path, self.classjson_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
self.assertTrue(self.classjson_path.exists())
def test_mit67_path_no_files(self):
with self.assertRaises(DatasetNotFoundError):
datasets.Mit67(True, root="./nope/", download=False)
def test_caltech101_get_length(self):
dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT)
self.assertEqual(len(dataset), 67)
def test_caltech101_get_num_classes(self):
dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT)
self.assertEqual(dataset.get_number_classes(), 67)
class TestOxfordPets(unittest.TestCase):
MOCK_DATASET_ROOT = DATA_FOLDER / "oxfordpets"
trainlst_path = MOCK_DATASET_ROOT / "annotations" / "trainval.txt"
validlst_path = MOCK_DATASET_ROOT / "annotations" / "test.txt"
@classmethod
def setUpClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
cls.MOCK_DATASET_ROOT.mkdir(exist_ok=True, parents=True)
(cls.MOCK_DATASET_ROOT / "images").mkdir(exist_ok=True, parents=True)
shutil.copytree(
str(DATA_FOLDER / "oxford_pets_lst"), str(cls.trainlst_path.parent)
)
with open(DATA_FOLDER / "oxford_pets_lst" / "trainval.txt") as f:
for line in f:
fname = line.split(" ")[0]
(cls.MOCK_DATASET_ROOT / "images" / f"{fname}.jpg").touch()
@classmethod
def tearDownClass(cls):
if cls.MOCK_DATASET_ROOT.exists():
shutil.rmtree(cls.MOCK_DATASET_ROOT)
def test_build_OxfordPets_train(self):
dataset = datasets.OxfordPets("train", root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, set)
self.assertEquals(len(dataset.classes), 37)
self.assertEquals(len(dataset.images), 3680)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
def test_build_OxfordPets_valid(self):
dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT)
self.assertIsInstance(dataset, torch.utils.data.Dataset)
self.assertIsInstance(dataset.classes, set)
self.assertEquals(len(dataset.classes), 37)
self.assertEquals(len(dataset.images), 3669)
self.assertEquals(dataset.train_lst_path, self.trainlst_path)
self.assertEquals(dataset.valid_lst_path, self.validlst_path)
self.assertTrue(self.trainlst_path.exists())
self.assertTrue(self.validlst_path.exists())
def test_OxfordPets_path_no_files(self):
with self.assertRaises(DatasetNotFoundError):
datasets.OxfordPets("valid", root="./nope/", download=False)
def test_OxfordPets_not_valid_subset(self):
with self.assertRaises(ValueError):
datasets.OxfordPets("nope", root=self.MOCK_DATASET_ROOT, download=False)
def test_OxfordPets_get_length(self):
dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT)
self.assertEqual(len(dataset), 3669)
def test_OxfordPets_get_num_classes(self):
dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT)
self.assertEqual(dataset.get_number_classes(), 37)
if __name__ == "__main__":
unittest.main()
| 42
| 85
| 0.695839
| 1,117
| 9,324
| 5.569382
| 0.101164
| 0.077801
| 0.106092
| 0.054975
| 0.871403
| 0.833628
| 0.804372
| 0.784601
| 0.776242
| 0.716605
| 0
| 0.016895
| 0.193801
| 9,324
| 221
| 86
| 42.190045
| 0.810696
| 0
| 0
| 0.63388
| 0
| 0
| 0.037859
| 0.00236
| 0
| 0
| 0
| 0
| 0.36612
| 1
| 0.136612
| false
| 0
| 0.032787
| 0
| 0.26776
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b721245ec6635464eec11f01500c633ddbfbbed4
| 21
|
py
|
Python
|
mva/__init__.py
|
garovent/mva
|
47ec8690003bbabbbdb59eb6ed8f7e02b0019fe5
|
[
"Apache-2.0"
] | 1
|
2022-02-02T15:30:19.000Z
|
2022-02-02T15:30:19.000Z
|
mva/__init__.py
|
garovent/mva
|
47ec8690003bbabbbdb59eb6ed8f7e02b0019fe5
|
[
"Apache-2.0"
] | null | null | null |
mva/__init__.py
|
garovent/mva
|
47ec8690003bbabbbdb59eb6ed8f7e02b0019fe5
|
[
"Apache-2.0"
] | null | null | null |
from .mva import mva
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b741cda8ba7c53c12f6d0359562d0130f04c95e0
| 24
|
py
|
Python
|
learning/__init__.py
|
BarryZM/KnowYouAI
|
8c9d96238090fa8fd70b8581ac536bb1b0691eb5
|
[
"MIT"
] | null | null | null |
learning/__init__.py
|
BarryZM/KnowYouAI
|
8c9d96238090fa8fd70b8581ac536bb1b0691eb5
|
[
"MIT"
] | null | null | null |
learning/__init__.py
|
BarryZM/KnowYouAI
|
8c9d96238090fa8fd70b8581ac536bb1b0691eb5
|
[
"MIT"
] | 1
|
2020-12-31T11:13:30.000Z
|
2020-12-31T11:13:30.000Z
|
from .learn import Learn
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f96e12f2cc3c9d04813e8a782b2e456aead8bec
| 26,280
|
py
|
Python
|
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/xconnect.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/xconnect.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/xconnect.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
# Xconnect
# DeviceAttributes (device_attr)
# AutodiscoveryBgpAttributes (autodiscovery_bgp)
# parent = xconnect.autodiscovery_bgp
# SignalingProtocolBgpAttributes (signaling_protocol_bgp)
# parent = xconnect.autodiscovery_bgp.signaling_protocol_bgp
# CeAttributes (ce_attr)
# InterfaceAttributes (interface_attr)
#
# DeviceAutodiscoveryBgpAttributesDefaults (autodiscovery_bgp) (no config)
# DeviceSignalingProtocolBgpAttributesDefaults (signaling_protocol_bgp) (no config)
from abc import ABC
import warnings
import contextlib
from genie.conf.base.attributes import UnsupportedAttributeWarning,\
AttributesHelper
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.config import CliConfig
from genie.libs.conf.l2vpn.pseudowire import PseudowireNeighbor,\
PseudowireIPv4Neighbor, PseudowireIPv6Neighbor, PseudowireEviNeighbor
from ..xconnect import Xconnect as _Xconnect
class Xconnect(ABC):
class DeviceAttributes(ABC):
class NeighborAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
nbr_ctx = None
nbr_is_submode = True
if isinstance(self.neighbor, PseudowireIPv4Neighbor):
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 (config-l2vpn)
assert self.ip is not None
assert self.pw_id is not None
nbr_ctx = attributes.format('neighbor ipv4 {ip} pw-id {pw_id}', force=True)
elif isinstance(self.neighbor, PseudowireIPv6Neighbor):
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 (config-l2vpn)
assert self.ip is not None
assert self.pw_id is not None
nbr_ctx = attributes.format('neighbor ipv6 {ip} pw-id {pw_id}', force=True)
elif isinstance(self.neighbor, PseudowireEviNeighbor):
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor evpn evi 1 target 1 source 1
assert self.evi is not None
assert self.ac_id is not None
assert self.source_ac_id is not None
nbr_ctx = attributes.format('neighbor evpn evi {evi.evi_id} target {ac_id} source {source_ac_id}', force=True)
nbr_is_submode = False
else:
raise ValueError(self.neighbor)
assert nbr_ctx
if not nbr_is_submode:
configurations.append_line(nbr_ctx)
else:
with configurations.submode_context(nbr_ctx):
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / bandwidth <0-4294967295>
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / bandwidth <0-4294967295>
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / local cookie size 0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / local cookie size 4 value 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / local cookie size 8 value 0x0 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / local session 1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / remote cookie size 0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / remote cookie size 4 value 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / remote cookie size 8 value 0x0 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / l2tp static / remote session 1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie secondary size 0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie secondary size 4 value 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie secondary size 8 value 0x0 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie size 0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie size 4 value 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local cookie size 8 value 0x0 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / local session 1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / remote cookie size 0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / remote cookie size 4 value 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / remote cookie size 8 value 0x0 0x0
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / l2tp static / remote session 1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / mpls static label local 16 remote 16
remote_label = attributes.value('mpls_static_label')
if remote_label is not None:
local_label = self.parent.neighbor_attr[self.remote_neighbor].mpls_static_label
if local_label is None:
warnings.warn(
'neighbor {!r} mpls_static_label missing'.format(self.remote_neighbor),
UnsupportedAttributeWarning)
else:
configurations.append_line('mpls static label local {} remote {}'.\
format(local_label, remote_label))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / pw-class someword3
v = attributes.value('pw_class')
if v is not None:
configurations.append_line('pw-class {}'.\
format(v.device_attr[self.device].name))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / source 1:2::3
elif isinstance(self.neighbor, PseudowireIPv6Neighbor):
configurations.append_line(attributes.format('ipv6 source {ipv6_source}'))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor [ipv4] 1.2.3.4 pw-id 1 / tag-impose vlan 1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 / tag-impose vlan 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class AutodiscoveryBgpAttributes(ABC):
class SignalingProtocolBgpAttributes(ABC):
class CeAttributes(ABC):
class InterfaceAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / ce-id 1 / interface Bundle-Ether1 remote-ce-id 1
configurations.append_line(attributes.format('interface {interface_name} remote-ce-id {remote_ce_id}', force=True))
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
#CeAttributes
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / ce-id 1 (config-l2vpn)
with configurations.submode_context(attributes.format('ce-id {ce_id}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / ce-id 1 / interface Bundle-Ether1 remote-ce-id 1
for ns, attributes2 in attributes.mapping_values('interface_attr', keys=self.interfaces, sort=True):
configurations.append_block(ns.build_config(apply=False, unconfig=unconfig, attributes=attributes2))
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
#SignalingProtocolBgpAttributes
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp (config-l2vpn)
with configurations.submode_context('signaling-protocol bgp'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / ce-range 11
configurations.append_line(attributes.format('ce-range {ce_range}'))
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / ce-id 1 (config-l2vpn)
for ns, attributes2 in attributes.mapping_values('ce_attr', keys=self.ce_ids, sort=True):
configurations.append_block(ns.build_config(apply=False, unconfig=unconfig, attributes=attributes2))
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both static
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive static
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit static
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label both static
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label receive static
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp / load-balancing flow-label transmit static
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
#AutodiscoveryBgpAttributes
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp (config-l2vpn)
with configurations.submode_context('autodiscovery bgp'):
if not attributes.value('enabled', force=True):
configurations.submode_cancel()
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / rd 100000:200
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / rd 100:200000
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / rd 1.2.3.4:1
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / rd auto
configurations.append_line(attributes.format('rd {rd}'))
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-policy export <rtepol>
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target 100000:200
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target 100:200000
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target 1.2.3.4:1
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export 100000:200
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export 100:200000
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export 1.2.3.4:1
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export import 100000:200 (bug)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export import 100:200000 (bug)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target export import 1.2.3.4:1 (bug)
both_route_targets = set(self.export_route_targets) & set(self.import_route_targets)
for v, attributes2 in attributes.sequence_values('export_route_targets', sort=True):
if v in both_route_targets:
cfg = 'route-target {}'.format(v.route_target)
else:
cfg = 'route-target export {}'.format(v.route_target)
if v.stitching:
warning.warn(UnsupportedAttributeWarning,
'route-target export/import stitching')
configurations.append_line(cfg)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target import 100000:200
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target import 100:200000
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / route-target import 1.2.3.4:1
for v, attributes2 in attributes.sequence_values('import_route_targets', sort=True):
if v not in both_route_targets:
cfg = 'route-target import {}'.format(v.route_target)
if v.stitching:
warning.warn(UnsupportedAttributeWarning,
'route-target export/import stitching')
configurations.append_line(cfg)
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp / signaling-protocol bgp (config-l2vpn)
ns, attributes2 = attributes.namespace('signaling_protocol_bgp')
if ns:
configurations.append_block(ns.build_config(apply=False, unconfig=unconfig, attributes=attributes2))
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
def build_config(self, apply=True, attributes=None, unconfig=False,
contained=False, **kwargs):
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn (config-l2vpn)
submode_stack = contextlib.ExitStack()
if not contained:
submode_stack.enter_context(
configurations.submode_context('l2vpn'))
# iosxr: l2vpn / xconnect group someword (config-l2vpn)
with configurations.submode_context(attributes.format('xconnect group {group_name}', force=True, cancel_empty=True)):
if self.xconnect_type is _Xconnect.Type.mp2mp:
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 (config-l2vpn)
with configurations.submode_context(attributes.format('mp2mp {name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / control-word disable
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / interworking ethernet
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / l2-encapsulation ethernet
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / l2-encapsulation vlan
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / mtu 64
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / shutdown
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / vpn-id 1
configurations.append_line(attributes.format('vpn-id {vpn_id}'))
# iosxr: l2vpn / xconnect group someword / mp2mp someword2 / autodiscovery bgp (config-l2vpn)
ns, attributes2 = attributes.namespace('autodiscovery_bgp')
if ns:
configurations.append_block(ns.build_config(apply=False, unconfig=unconfig, attributes=attributes2))
elif self.xconnect_type is _Xconnect.Type.p2p:
# iosxr: l2vpn / xconnect group someword / p2p someword2 (config-l2vpn)
with configurations.submode_context(attributes.format('p2p {name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / xconnect group someword / p2p someword2 / backup interface Bundle-Ether1
# iosxr: l2vpn / xconnect group someword / p2p someword2 / description someword3
configurations.append_line(attributes.format('description {description}'))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / interface Bundle-Ether1
for interface, attributes2 in attributes.sequence_values('interfaces', sort=True):
configurations.append_line('interface {}'.\
format(interface.name))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / interworking ethernet
# iosxr: l2vpn / xconnect group someword / p2p someword2 / interworking ipv4
configurations.append_line(attributes.format('interworking {interworking}'))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / monitor-session someword3
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv4 1.2.3.4 pw-id 1 (config-l2vpn)
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor ipv6 1:2::3 pw-id 1 (config-l2vpn)
for sub, attributes2 in attributes.mapping_values('neighbor_attr', keys=self.pseudowire_neighbors, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / xconnect group someword / p2p someword2 / neighbor evpn evi 1 target 1 source 1
else:
warnings.warn(
'xconnect type mode {}'.format(self.xconnect_type),
UnsupportedAttributeWarning)
submode_stack.close()
if apply:
if configurations:
self.device.configure(str(configurations), fail_invalid=True)
else:
return CliConfig(device=self.device, unconfig=unconfig,
cli_config=configurations, fail_invalid=True)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
| 73
| 187
| 0.596423
| 2,795
| 26,280
| 5.550268
| 0.067979
| 0.063173
| 0.112551
| 0.143815
| 0.819893
| 0.783988
| 0.773158
| 0.718752
| 0.701089
| 0.686328
| 0
| 0.048113
| 0.329338
| 26,280
| 359
| 188
| 73.203343
| 0.832057
| 0.445586
| 0
| 0.497207
| 0
| 0
| 0.056793
| 0.001522
| 0
| 0
| 0
| 0
| 0.106145
| 1
| 0.067039
| false
| 0
| 0.072626
| 0.03352
| 0.24581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7655e9020c51877720b2ef28f7e85689f4d201e
| 70,663
|
py
|
Python
|
pyne/tests/test_source_sampling.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | 1
|
2022-03-26T20:01:13.000Z
|
2022-03-26T20:01:13.000Z
|
pyne/tests/test_source_sampling.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | null | null | null |
pyne/tests/test_source_sampling.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
|
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
|
[
"MIT"
] | 1
|
2022-03-26T19:59:13.000Z
|
2022-03-26T19:59:13.000Z
|
import os
import warnings
import itertools
from operator import itemgetter
from nose.tools import assert_equal, with_setup, assert_almost_equal, assert_raises
from random import uniform, seed
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
try:
from pyne.mesh import Mesh
# see if the source sampling module exists but do not import it
import imp
pyne_info = imp.find_module("pyne")
pyne_mod = imp.load_module("pyne", *pyne_info)
imp.find_module("source_sampling", pyne_mod.__path__)
except ImportError:
from nose.plugins.skip import SkipTest
raise SkipTest
from pyne.source_sampling import Sampler, AliasTable
from pyne.mesh import Mesh, NativeMeshTag
from pyne.r2s import tag_e_bounds
from pymoab import core as mb_core, types
from pyne.utils import QAWarning
warnings.simplefilter("ignore", QAWarning)
# Define modes
DEFAULT_ANALOG = 0
DEFAULT_UNIFORM = 1
DEFAULT_USER = 2
SUBVOXEL_ANALOG = 3
SUBVOXEL_UNIFORM = 4
SUBVOXEL_USER = 5
def try_rm_file(filename):
return lambda: os.remove(filename) if os.path.exists(filename) else None
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_single_hex_tag_names_map():
"""This test tests uniform sampling within a single hex volume element.
This is done by dividing the volume element in 4 smaller hex and ensuring
that each sub-hex is sampled equally.
"""
seed(1953)
m = Mesh(
structured=True, structured_coords=[[0, 3, 3.5], [0, 1], [0, 1]], mats=None
)
m.src = NativeMeshTag(2, float)
m.src[:] = [[2.0, 1.0], [9.0, 3.0]]
e_bounds = np.array([0, 0.5, 1.0])
m = tag_e_bounds(m, e_bounds)
m.bias = NativeMeshTag(2, float)
cell_fracs = np.zeros(
2,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0), (1, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
m.bias[:] = [[1.0, 2.0], [3.0, 3.0]]
filename = "sampling_mesh.h5m"
m.write_hdf5(filename)
# right condition with e_bounds in 'source.h5m'
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, DEFAULT_ANALOG)
# right condition with e_bounds provided by both 'e_bounds' and 'source.h5m'
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_ANALOG)
# src_tag_name not given
tag_names = {
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_USER)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_USER)
# bias_tag_name not given
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_USER)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_USER)
# cell_number_tag_name not given
tag_names = {
"src_tag_name": "src",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_USER)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_USER)
# cell_fracs_tag_name not given
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"e_bounds_tag_name": "e_bounds",
}
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, DEFAULT_USER)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
assert_raises(ValueError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_USER)
# wrong bias_tag data (non-zero source_density biased to zero -> NAN weight)
m.src = NativeMeshTag(2, float)
m.src[:] = [[1.0, 1.0]]
m.bias = NativeMeshTag(2, float)
m.bias[:] = [[0.0, 0.0]]
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
assert_raises(RuntimeError, Sampler, filename, tag_names, e_bounds, SUBVOXEL_USER)
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_analog_single_hex():
"""This test tests that particles of sampled evenly within the phase-space
of a single mesh volume element with one energy group in an analog sampling
scheme. This done by dividing each dimension (x, y, z, E) in half, then
sampling particles and tallying on the basis of which of the 2^4 = 8 regions
of phase space the particle is born into.
"""
seed(1953)
m = Mesh(structured=True, structured_coords=[[0, 1], [0, 1], [0, 1]], mats=None)
m.src = NativeMeshTag(1, float)
m.src[0] = 1.0
cell_fracs = np.zeros(
1,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
e_bounds = np.array([0, 1.0])
m = tag_e_bounds(m, e_bounds)
filename = "sampling_mesh.h5m"
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_ANALOG)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(num_divs, num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0, 1) for x in range(6)]))
assert_equal(s.w, 1.0) # analog: all weights must be one
tally[
int(s.x * num_divs),
int(s.y * num_divs),
int(s.z * num_divs),
int(s.e * num_divs),
] += score
# Test that each half-space of phase space (e.g. x > 0.5) is sampled about
# half the time.
for i in range(0, 4):
for j in range(0, 2):
assert abs(np.sum(np.rollaxis(tally, i)[j, :, :, :]) - 0.5) < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_analog_multiple_hex():
"""This test tests that particle are sampled uniformly from a uniform source
defined on eight mesh volume elements in two energy groups. This is done
using the exact same method ass test_analog_multiple_hex.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0, 0.5, 1], [0, 0.5, 1], [0, 0.5, 1]],
mats=None,
)
m.src = NativeMeshTag(2, float)
m.src[:] = np.ones(shape=(8, 2))
cell_fracs = np.zeros(
8,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 11, 1.0, 0.0),
(1, 11, 1.0, 0.0),
(2, 11, 1.0, 0.0),
(3, 11, 1.0, 0.0),
(4, 11, 1.0, 0.0),
(5, 11, 1.0, 0.0),
(6, 11, 1.0, 0.0),
(7, 11, 1.0, 0.0),
]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0, 0.5, 1])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_ANALOG)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(num_divs, num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
assert_equal(s.w, 1.0)
tally[
int(s.x * num_divs),
int(s.y * num_divs),
int(s.z * num_divs),
int(s.e * num_divs),
] += score
for i in range(0, 4):
for j in range(0, 2):
halfspace_sum = np.sum(np.rollaxis(tally, i)[j, :, :, :])
assert abs(halfspace_sum - 0.5) / 0.5 < 0.1
@with_setup(None, try_rm_file("tet.h5m"))
def test_analog_single_tet():
"""This test tests uniform sampling within a single tetrahedron. This is
done by dividing the tetrahedron in 4 smaller tetrahedrons and ensuring
that each sub-tet is sampled equally.
"""
seed(1953)
mesh = mb_core.Core()
v1 = [0.0, 0.0, 0.0]
v2 = [1.0, 0.0, 0.0]
v3 = [0.0, 1.0, 0.0]
v4 = [0.0, 0.0, 1.0]
verts = mesh.create_vertices([v1, v2, v3, v4])
mesh.create_element(types.MBTET, verts)
m = Mesh(structured=False, mesh=mesh)
m.src = NativeMeshTag(1, float)
m.src[:] = np.array([1])
filename = "tet.h5m"
e_bounds = np.array([0.0, 1.0])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
center = m.ve_center(list(m.iter_ve())[0])
subtets = [
[center, v1, v2, v3],
[center, v1, v2, v4],
[center, v1, v3, v4],
[center, v2, v3, v4],
]
tag_names = {"src_tag_name": "src", "e_bounds_tag_name": "e_bounds"}
sampler = Sampler(filename, tag_names, np.array([0.0, 1.0]), DEFAULT_ANALOG)
num_samples = 5000
score = 1.0 / num_samples
tally = np.zeros(shape=(4))
for i in range(num_samples):
s = sampler.particle_birth([uniform(0.0, 1.0) for x in range(6)])
assert_equal(s.w, 1.0)
for i, tet in enumerate(subtets):
if point_in_tet(tet, [s.x, s.y, s.z]):
tally[i] += score
break
for t in tally:
assert abs(t - 0.25) / 0.25 < 0.2
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_uniform():
"""This test tests that the uniform biasing scheme:
1. Samples space uniformly. This is checked using the same method
described in test_analog_single_hex().
2. Adjusts weights accordingly. Sample calculations are provided in Case 1
in the Theory Manual.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0, 3, 3.5], [0.0, 1.0], [0.0, 1.0]],
mats=None,
)
m.src = NativeMeshTag(2, float)
m.src[:] = [[2.0, 1.0], [9.0, 3.0]]
e_bounds = np.array([0.0, 0.5, 1.0])
filename = "sampling_mesh.h5m"
cell_fracs = np.zeros(
2,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0), (1, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_UNIFORM)
num_samples = 10000
score = 1.0 / num_samples
num_divs = 2
num_e = 2
spatial_tally = np.zeros(shape=(num_divs, num_divs, num_divs))
e_tally = np.zeros(shape=(4)) # number of phase space groups
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0, 1) for x in range(6)]))
if s.x < 3.0:
assert_almost_equal(s.w, 0.7) # hand calcs
else:
assert_almost_equal(s.w, 2.8) # hand calcs
spatial_tally[
int(s.x * num_divs / 3.5),
int(s.y * num_divs / 1.0),
int(s.z * num_divs / 1.0),
] += score
if s.x < 3 and s.e < 0.5:
e_tally[0] += score
elif s.x < 3 and s.e > 0.5:
e_tally[1] += score
if s.x > 3 and s.e < 0.5:
e_tally[2] += score
if s.x > 3 and s.e > 0.5:
e_tally[3] += score
for i in range(0, 3):
for j in range(0, 2):
halfspace_sum = np.sum(np.rollaxis(spatial_tally, i)[j, :, :])
assert abs(halfspace_sum - 0.5) / 0.5 < 0.1
expected_e_tally = [4.0 / 7, 2.0 / 7, 3.0 / 28, 1.0 / 28] # hand calcs
for i in range(4):
assert abs(e_tally[i] - expected_e_tally[i]) / expected_e_tally[i] < 0.1
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_single_hex_single_subvoxel_analog():
"""This test tests that particles of sampled evenly within the phase-space
of a single mesh volume element (also a sub-voxel) with one energy group
in an analog sampling scheme. This done by dividing each dimension
(x, y, z, E) in half, then sampling particles and tallying on the basis of
which of the 2^4 = 16 regions of phase space the particle is born into.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
mats=None,
)
m.src = NativeMeshTag(1, float)
m.src[0] = 1.0
cell_fracs = np.zeros(
1,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0.0, 1.0])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(num_divs, num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0.0, 1.0) for x in range(6)]))
assert_equal(s.w, 1.0) # analog: all weights must be one
assert_equal(s.cell_list[0], 11) # analog: the cell number
tally[
int(s.x * num_divs),
int(s.y * num_divs),
int(s.z * num_divs),
int(s.e * num_divs),
] += score
# Test that each half-space of phase space (e.g. x > 0.5) is sampled about
# half the time.
for i in range(0, 4):
for j in range(0, 2):
assert abs(np.sum(np.rollaxis(tally, i)[j, :, :, :]) - 0.5) < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_single_hex_multiple_subvoxel_analog():
"""This test tests that particles of sampled analog within the phase-space
of a single mesh volume element but multiple sub-voxels with one energy
group in an analog sampling scheme. Then sampling particles and tallying
the particles and check the probability of particles born in each
sub-voxel and the cell_number.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
mats=None,
)
m.src = NativeMeshTag(3, float)
m.src[:] = np.empty(shape=(1, 3))
m.src[0] = [0, 0.2, 0.8]
cell_fracs = np.zeros(
3,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 0.4, 0.0), (0, 12, 0.3, 0.0), (0, 13, 0.3, 0.0)]
m.tag_cell_fracs(cell_fracs) # cell_fracs will be sorted
filename = "sampling_mesh.h5m"
e_bounds = np.array([0, 1])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
num_samples = 50000
score = 1.0 / num_samples
num_divs = 2
tally = [0.0] * 3
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0, 1) for x in range(6)]))
assert_equal(s.w, 1.0) # analog: all weights must be one
if s.cell_list[0] == 11:
tally[0] += score
elif s.cell_list[0] == 12:
tally[1] += score
elif s.cell_list[0] == 13:
tally[2] += score
# Test that each source particle in each cell has right frequency
assert_equal(tally[0], 0.0)
assert abs(tally[1] - 0.2) / 0.2 < 0.05
assert abs(tally[2] - 0.8) / 0.8 < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_multiple_hex_multiple_subvoxel_analog():
"""This test tests that particle are sampled analog from a uniform source
defined on eight mesh volume elements in two energy groups.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0, 0.5, 1], [0, 0.5, 1], [0, 0.5, 1]],
mats=None,
)
m.src = NativeMeshTag(2, float)
m.src[:] = np.ones(shape=(8, 2))
cell_fracs = np.zeros(
8,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 1, 1.0, 0.0),
(1, 2, 1.0, 0.0),
(2, 3, 1.0, 0.0),
(3, 4, 1.0, 0.0),
(4, 5, 1.0, 0.0),
(5, 6, 1.0, 0.0),
(6, 7, 1.0, 0.0),
(7, 8, 1.0, 0.0),
]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0, 0.5, 1])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_ANALOG)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(num_divs, num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
assert_equal(s.w, 1.0)
assert_equal(
s.cell_list[0],
4 * int(s.x * num_divs) + 2 * int(s.y * num_divs) + int(s.z * num_divs) + 1,
)
tally[
int(s.x * num_divs),
int(s.y * num_divs),
int(s.z * num_divs),
int(s.e * num_divs),
] += score
for i in range(0, 4):
for j in range(0, 2):
halfspace_sum = np.sum(np.rollaxis(tally, i)[j, :, :, :])
assert abs(halfspace_sum - 0.5) / 0.5 < 0.1
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_single_hex_subvoxel_uniform():
"""This test tests that particles of sampled evenly within the phase-space
of a single mesh volume element with one energy group in an uniform sampling
scheme. This done by dividing each dimension (x, y, z, E) in half, then
sampling particles and tallying on the basis of which of the 2^4 = 8 regions
of phase space the particle is born into.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
mats=None,
)
m.src = NativeMeshTag(1, float)
m.src[0] = 1.0
cell_fracs = np.zeros(
1,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0.0, 1.0])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(num_divs, num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0.0, 1.0) for x in range(6)]))
assert_equal(s.w, 1.0) # analog: all weights must be one
assert_equal(s.cell_list[0], 11) # analog: the cell number
tally[
int(s.x * num_divs),
int(s.y * num_divs),
int(s.z * num_divs),
int(s.e * num_divs),
] += score
# Test that each half-space of phase space (e.g. x > 0.5) is sampled about
# half the time.
for i in range(0, 4):
for j in range(0, 2):
assert abs(np.sum(np.rollaxis(tally, i)[j, :, :, :]) - 0.5) < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_single_hex_multiple_subvoxel_uniform():
"""This test tests that particles of sampled evenly within the phase-space
of a single mesh volume element with one energy group in an uniform sampling
scheme. This done by dividing each dimension (x, y, z, E) in half, then
sampling particles and tallying on the basis of which of the 2^4 = 8 regions
of phase space the particle is born into.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
mats=None,
)
m.src = NativeMeshTag(3, float)
m.src[:] = np.empty(shape=(1, 3))
m.src[0] = [0, 0.2, 0.8]
cell_fracs = np.zeros(
3,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 0.4, 0.0), (0, 12, 0.3, 0.0), (0, 13, 0.3, 0.0)]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0.0, 1.0])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
num_samples = 5000
score = 1.0 / num_samples
num_divs = 2
tally = [0.0] * 3
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0.0, 1.0) for x in range(6)]))
if s.cell_list[0] == 11:
tally[0] += score
if s.cell_list[0] == 12:
tally[1] += score
# analog: all weights must be one
assert abs(s.w - 0.4) / 0.4 < 0.05
if s.cell_list[0] == 13:
tally[2] += score
assert abs(s.w - 1.6) / 1.6 < 0.05
# Test that each source particle in each cell has right frequency
assert_equal(tally[0], 0.0)
assert abs(tally[1] - 0.5) < 0.05
assert abs(tally[2] - 0.5) < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_multiple_hex_multiple_subvoxel_uniform():
"""This test tests that particle are sampled uniformly from a uniform source
defined on eight mesh volume elements in two energy groups.
"""
seed(1953)
m = Mesh(
structured=True,
structured_coords=[[0, 0.5, 1], [0, 0.5, 1], [0, 0.5, 1]],
mats=None,
)
m.src = NativeMeshTag(2, float)
m.src[:] = np.empty(shape=(8, 2), dtype=float)
m.src[:] = [[0, 0], [1, 0], [0, 0], [2, 0], [0, 0], [3, 0], [0, 0], [4, 0]]
cell_fracs = np.zeros(
8,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 0, 1.0, 0.0),
(1, 1, 1.0, 0.0),
(2, 2, 1.0, 0.0),
(3, 3, 1.0, 0.0),
(4, 4, 1.0, 0.0),
(5, 5, 1.0, 0.0),
(6, 6, 1.0, 0.0),
(7, 7, 1.0, 0.0),
]
empty_cells = [0, 2, 4, 6]
m.tag_cell_fracs(cell_fracs)
filename = "sampling_mesh.h5m"
e_bounds = np.array([0, 0.5, 1])
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_UNIFORM)
num_samples = 50000
score = 1.0 / num_samples
num_divs = 2
tally = [0.0] * 8
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
# check the cell_number
assert_equal(
s.cell_list[0],
4 * int(s.x * num_divs) + 2 * int(s.y * num_divs) + int(s.z * num_divs),
)
# check the weight of each subvoxel
if s.cell_list[0] not in empty_cells:
# weight for cell 1, 3, 5, 7 should be: 0.4, 0.8, 1.2, 1.6
exp_w = (s.cell_list[0] + 1) / 2 * 0.4
out_w = s.w
assert abs(out_w - exp_w) / exp_w < 0.05 # hand calculate
# count the tally
tally[s.cell_list[0]] += score
# check the real sample rate
for i, item in enumerate(tally):
if i not in empty_cells:
assert abs(item - 0.25) / 0.25 < 0.05
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_bias():
"""This test tests that a user-specified biasing scheme:
1. Samples space uniformly according to the scheme.
2. Adjusts weights accordingly. Sample calculations are provided in Case 2
in the Theory Manual.
"""
seed(1953)
m = Mesh(
structured=True, structured_coords=[[0, 3, 3.5], [0, 1], [0, 1]], mats=None
)
m.src = NativeMeshTag(2, float)
m.src[:] = [[2.0, 1.0], [9.0, 3.0]]
e_bounds = np.array([0, 0.5, 1.0])
m.bias = NativeMeshTag(2, float)
m.bias[:] = [[1.0, 2.0], [3.0, 3.0]]
cell_fracs = np.zeros(
2,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0), (1, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
filename = "sampling_mesh.h5m"
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_USER)
num_samples = 10000
score = 1.0 / num_samples
num_divs = 2
tally = np.zeros(shape=(4))
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0, 1) for x in range(6)]))
if s.x < 3:
if s.e < 0.5:
assert_almost_equal(s.w, 1.6) # hand calcs
tally[0] += score
else:
assert_almost_equal(s.w, 0.4) # hand calcs
tally[1] += score
else:
if s.e < 0.5:
assert_almost_equal(s.w, 2.4) # hand calcs
tally[2] += score
else:
assert_almost_equal(s.w, 0.8) # hand calcs
tally[3] += score
expected_tally = [0.25, 0.5, 0.125, 0.125] # hand calcs
for a, b in zip(tally, expected_tally):
assert abs(a - b) / b < 0.25
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_bias_spatial():
"""This test tests a user-specified biasing scheme for which the only 1
bias group is supplied for a source distribution containing two energy
groups. This bias group is applied to both energy groups. In this test,
the user-supplied bias distribution that was choosen, correspondes to
uniform sampling, so that results can be checked against Case 1 in the
theory manual.
"""
seed(1953)
m = Mesh(
structured=True, structured_coords=[[0, 3, 3.5], [0, 1], [0, 1]], mats=None
)
m.src = NativeMeshTag(2, float)
m.src[:] = [[2.0, 1.0], [9.0, 3.0]]
m.bias = NativeMeshTag(1, float)
m.bias[:] = [1, 1]
e_bounds = np.array([0, 0.5, 1.0])
filename = "sampling_mesh.h5m"
cell_fracs = np.zeros(
2,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [(0, 11, 1.0, 0.0), (1, 11, 1.0, 0.0)]
m.tag_cell_fracs(cell_fracs)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
sampler = Sampler(filename, tag_names, e_bounds, DEFAULT_USER)
num_samples = 10000
score = 1.0 / num_samples
num_divs = 2
num_e = 2
spatial_tally = np.zeros(shape=(num_divs, num_divs, num_divs))
e_tally = np.zeros(shape=(4)) # number of phase space groups
for i in range(num_samples):
s = sampler.particle_birth(np.array([uniform(0, 1) for x in range(6)]))
if s.x < 3.0:
assert_almost_equal(s.w, 0.7) # hand calcs
else:
assert_almost_equal(s.w, 2.8) # hand calcs
spatial_tally[
int(s.x * num_divs / 3.5),
int(s.y * num_divs / 1.0),
int(s.z * num_divs / 1.0),
] += score
if s.x < 3 and s.e < 0.5:
e_tally[0] += score
elif s.x < 3 and s.e > 0.5:
e_tally[1] += score
if s.x > 3 and s.e < 0.5:
e_tally[2] += score
if s.x > 3 and s.e > 0.5:
e_tally[3] += score
for i in range(0, 3):
for j in range(0, 2):
halfspace_sum = np.sum(np.rollaxis(spatial_tally, i)[j, :, :])
assert abs(halfspace_sum - 0.5) / 0.5 < 0.1
expected_e_tally = [4.0 / 7, 2.0 / 7, 3.0 / 28, 1.0 / 28] # hand calcs
for i in range(4):
assert abs(e_tally[i] - expected_e_tally[i]) / expected_e_tally[i] < 0.1
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_subvoxel_multiple_hex_bias_1():
"""This test tests that particle are sampled from a biased source
defined on two voxels (2*2 = 4 sub-voxels) with the biased tag length of 1.
"""
seed(1953)
# mesh contains two voxels. 2 * 1 * 1 = 2
m = Mesh(
structured=True, structured_coords=[[0, 0.5, 1], [0, 1], [0, 1]], mats=None
)
# max_num_cells = 2. 4 sub-voxels
cell_fracs = np.zeros(
4,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 11, 0.5, 0.0),
(0, 12, 0.5, 0.0),
(1, 21, 0.5, 0.0),
(1, 22, 0.5, 0.0),
]
m.tag_cell_fracs(cell_fracs)
# the photon emitting rate of 4 sub-voxels is 0.1, 0.2, 0.3, 0.4
m.src = NativeMeshTag(4, float)
m.src[:] = np.empty(shape=(2, 4), dtype=float)
m.src[:] = [[0.05, 0.05, 0.10, 0.10], [0.15, 0.15, 0.20, 0.20]]
e_bounds = np.array([0, 0.5, 1.0])
# bias, tag size = 1
m.bias = NativeMeshTag(1, float)
m.bias[:] = [[0.4], [0.6]]
filename = "sampling_mesh.h5m"
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_USER)
num_samples = 50000
score = 1.0 / num_samples
num_divs = 2
# tally shape (v, c, e)
tally = np.zeros(shape=(num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
vid = s.cell_list[0] // 10 - 1
cid = s.cell_list[0] % 10 - 1
eid = 0 if s.e < 0.5 else 1
# check the cell_number
if s.x < 0.5:
assert s.cell_list[0] in [11, 12]
if s.x > 0.5:
assert s.cell_list[0] in [21, 22]
# check the weight of each subvoxel
if vid == 0:
assert abs(s.w - 0.746) / 0.746 < 0.05
if vid == 1:
assert abs(s.w - 1.163) / 1.163 < 0.05
# count the tally
tally[vid, cid, eid] += score
# check the real sample rate
# exp_tally calculated by hand
exp_tally = np.zeros(shape=(2, 2, 2))
exp_tally[:] = [[[0.067, 0.067], [0.133, 0.133]], [[0.129, 0.129], [0.171, 0.171]]]
for v in range(2):
for c in range(2):
for e in range(2):
assert (
abs(tally[v, c, e] - exp_tally[v, c, e]) / exp_tally[v, c, e] < 0.05
)
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_subvoxel_multiple_hex_bias_max_num_cells_num_e_groups():
"""This test tests that particle are sampled from a biased source
defined on two voxels (2*2 = 4 sub-voxels) with the biased tag length
of max_num_cells*num_e_group.
"""
seed(1953)
# mesh contains two voxels. 2 * 1 * 1 = 2
m = Mesh(
structured=True, structured_coords=[[0, 0.5, 1], [0, 1], [0, 1]], mats=None
)
# max_num_cells = 2. 4 sub-voxels
cell_fracs = np.zeros(
4,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 11, 0.5, 0.0),
(0, 12, 0.5, 0.0),
(1, 21, 0.5, 0.0),
(1, 22, 0.5, 0.0),
]
m.tag_cell_fracs(cell_fracs)
# the photon emitting rate of 4 sub-voxels is 0.1, 0.2, 0.3, 0.4
m.src = NativeMeshTag(4, float)
m.src[:] = np.empty(shape=(2, 4), dtype=float)
m.src[:] = [[0.125, 0.125, 0.125, 0.125], [0.125, 0.125, 0.125, 0.125]]
e_bounds = np.array([0, 0.5, 1.0])
# bias, tag size = 1
m.bias = NativeMeshTag(4, float)
m.bias[:] = [[0.125, 0.125, 0.1, 0.15], [0.1, 0.1, 0.15, 0.15]]
filename = "sampling_mesh.h5m"
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_USER)
num_samples = 50000
score = 1.0 / num_samples
num_divs = 2
# tally shape (v, c, e)
tally = np.zeros(shape=(num_divs, num_divs, num_divs))
exp_wgt = np.zeros(shape=(num_divs, num_divs, num_divs))
exp_wgt[:] = [[[1.0, 1.0], [1.25, 0.83]], [[1.25, 1.25], [0.83, 0.83]]]
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
vid = s.cell_list[0] // 10 - 1
cid = s.cell_list[0] % 10 - 1
eid = 0 if s.e < 0.5 else 1
# check the cell_number
if s.x < 0.5:
assert s.cell_list[0] in [11, 12]
if s.x > 0.5:
assert s.cell_list[0] in [21, 22]
# check the weight of each subvoxel
assert abs(s.w - exp_wgt[vid, cid, eid]) / exp_wgt[vid, cid, eid] < 0.05
# count the tally
tally[vid, cid, eid] += score
# check the real sample rate
exp_tally = np.zeros(shape=(2, 2, 2))
exp_tally[:] = [[[0.125, 0.125], [0.100, 0.150]], [[0.100, 0.100], [0.150, 0.150]]]
for v in range(2):
for c in range(2):
for e in range(2):
assert (
abs(tally[v, c, e] - exp_tally[v, c, e]) / exp_tally[v, c, e] < 0.05
)
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def test_subvoxel_multiple_hex_bias_e_groups():
"""This test tests that particle are sampled from a biased source
defined on two voxels (2*2 = 4 sub-voxels) with the biased tag length
of energy groups.
"""
seed(1953)
# mesh contains two voxels. 2 * 1 * 1 = 2
m = Mesh(
structured=True, structured_coords=[[0, 0.5, 1], [0, 1], [0, 1]], mats=None
)
# max_num_cells = 2. 4 sub-voxels
cell_fracs = np.zeros(
4,
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = [
(0, 11, 0.5, 0.0),
(0, 12, 0.5, 0.0),
(1, 21, 0.5, 0.0),
(1, 22, 0.5, 0.0),
]
m.tag_cell_fracs(cell_fracs)
# the photon emitting rate of 4 sub-voxels is 0.1, 0.2, 0.3, 0.4
m.src = NativeMeshTag(4, float)
m.src[:] = np.empty(shape=(2, 4), dtype=float)
m.src[:] = [[0.05, 0.05, 0.10, 0.10], [0.15, 0.15, 0.20, 0.20]]
e_bounds = np.array([0, 0.5, 1.0])
# bias, tag size = 1
m.bias = NativeMeshTag(2, float)
m.bias[:] = [[0.1, 0.3], [0.2, 0.4]]
filename = "sampling_mesh.h5m"
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"bias_tag_name": "bias",
"e_bounds_tag_name": "e_bounds",
}
sampler = Sampler(filename, tag_names, e_bounds, SUBVOXEL_USER)
num_samples = 50000
score = 1.0 / num_samples
num_divs = 2
# tally shape (v, c, e)
tally = np.zeros(shape=(num_divs, num_divs, num_divs))
for i in range(num_samples):
s = sampler.particle_birth([uniform(0, 1) for x in range(6)])
vid = s.cell_list[0] // 10 - 1
cid = s.cell_list[0] % 10 - 1
eid = 0 if s.e < 0.5 else 1
# check the cell_number
if s.x < 0.5:
assert s.cell_list[0] in [11, 12]
if s.x > 0.5:
assert s.cell_list[0] in [21, 22]
# check the weight of each subvoxel
if vid == 0 and eid == 0:
assert abs(s.w - 1.5) / 1.5 < 0.05
if vid == 0 and eid == 1:
assert abs(s.w - 0.5) / 0.5 < 0.05
if vid == 1 and eid == 0:
assert abs(s.w - 1.75) / 1.75 < 0.05
if vid == 1 and eid == 1:
assert abs(s.w - 0.875) / 0.875 < 0.05
# count the tally
tally[vid, cid, eid] += score
# check the real sample rate
exp_tally = np.zeros(shape=(2, 2, 2))
exp_tally[:] = [
[[0.0333, 0.1000], [0.0667, 0.2000]],
[[0.0857, 0.1714], [0.1143, 0.2286]],
]
for v in range(2):
for c in range(2):
for e in range(2):
assert (
abs(tally[v, c, e] - exp_tally[v, c, e]) / exp_tally[v, c, e] < 0.05
)
def test_alias_table():
"""This tests that the AliasTable class produces samples in the ratios
consistant with the supplied PDF.
"""
seed(1953)
pdf = np.array([0.1, 0.2, 0.7])
at = AliasTable(pdf)
num_samples = 50000
score = 1.0 / num_samples
tally = np.zeros(shape=(3))
for i in range(num_samples):
s = at.sample_pdf(uniform(0, 1), uniform(0, 1))
tally[s] += score
for i in range(0, 3):
assert abs(tally[i] - pdf[i]) / pdf[i] < 0.05
def point_in_tet(t, p):
"""This function determines if some point <p> lies within some tetrahedron
<t> using the method described here:
http://steve.hollasch.net/cgindex/geometry/ptintet.html
"""
matricies = [
np.array(
[
[t[0][0], t[0][1], t[0][2], 1],
[t[1][0], t[1][1], t[1][2], 1],
[t[2][0], t[2][1], t[2][2], 1],
[t[3][0], t[3][1], t[3][2], 1],
]
),
np.array(
[
[p[0], p[1], p[2], 1],
[t[1][0], t[1][1], t[1][2], 1],
[t[2][0], t[2][1], t[2][2], 1],
[t[3][0], t[3][1], t[3][2], 1],
]
),
np.array(
[
[t[0][0], t[0][1], t[0][2], 1],
[p[0], p[1], p[2], 1],
[t[2][0], t[2][1], t[2][2], 1],
[t[3][0], t[3][1], t[3][2], 1],
]
),
np.array(
[
[t[0][0], t[0][1], t[0][2], 1],
[t[1][0], t[1][1], t[1][2], 1],
[p[0], p[1], p[2], 1],
[t[3][0], t[3][1], t[3][2], 1],
]
),
np.array(
[
[t[0][0], t[0][1], t[0][2], 1],
[t[1][0], t[1][1], t[1][2], 1],
[t[2][0], t[2][1], t[2][2], 1],
[p[0], p[1], p[2], 1],
]
),
]
determinates = [np.linalg.det(x) for x in matricies]
return all(x >= 0 for x in determinates) or all(x < 0 for x in determinates)
def test_template_examples():
"""
An example of using source_sampling test template to do the test
"""
# DEFAULT and SUBVOXEL
for mode in (
DEFAULT_ANALOG,
DEFAULT_UNIFORM,
DEFAULT_USER,
SUBVOXEL_ANALOG,
SUBVOXEL_UNIFORM,
SUBVOXEL_USER,
):
for num_e_groups in (1, 2):
# num_bias_groups could be:
# 1, num_e_groups, and max_num_cells*num_e_groups
# test case: 1 voxel, 1 subvoxel
cell_fracs_list = [(0, 1, 1.0, 0.0)]
src_tag = [[1.0] * num_e_groups]
if mode == DEFAULT_USER or mode == SUBVOXEL_USER:
for num_bias_groups in (1, num_e_groups):
bias_tag = [[1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
else:
_source_sampling_test_template(mode, cell_fracs_list, src_tag)
# test case: 1 voxel, 2 subvoxels
# create src and cell_fracs tag data
if mode in (0, 1, 2):
src_tag = [[1.0] * num_e_groups]
cell_fracs_list = [(0, 1, 1.0, 0.0)]
elif mode in (3, 4, 5):
src_tag = [[1.0, 1.0] * num_e_groups]
cell_fracs_list = [(0, 1, 0.5, 0.0), (0, 2, 0.5, 0.0)]
if mode == DEFAULT_USER:
for num_bias_groups in (1, num_e_groups):
bias_tag = [[1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
elif mode == SUBVOXEL_USER:
for num_bias_groups in (1, num_e_groups, 2 * num_e_groups):
bias_tag = [[1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
else:
_source_sampling_test_template(mode, cell_fracs_list, src_tag)
# test case: 2 voxel, 2 subvoxels
cell_fracs_list = [(0, 1, 1.0, 0.0), (1, 2, 1.0, 0.0)]
src_tag = [[1.0] * num_e_groups, [1.0] * num_e_groups]
if mode == DEFAULT_USER or mode == SUBVOXEL_USER:
for num_bias_groups in (1, num_e_groups):
bias_tag = [[1.0] * num_bias_groups, [1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
else:
_source_sampling_test_template(mode, cell_fracs_list, src_tag)
# test case: 2 voxel, 4 subvoxels
# create src and cell_fracs tag data
if mode in (0, 1, 2):
src_tag = [[1.0] * num_e_groups, [1.0] * num_e_groups]
cell_fracs_list = [(0, 1, 1.0, 0.0), (1, 2, 1.0, 0.0)]
elif mode in (3, 4, 5):
src_tag = [[1.0, 1.0] * num_e_groups, [1.0, 1.0] * num_e_groups]
cell_fracs_list = [
(0, 1, 0.5, 0.0),
(0, 2, 0.5, 0.0),
(1, 3, 0.5, 0.0),
(1, 4, 0.5, 0.0),
]
if mode == DEFAULT_USER:
for num_bias_groups in (1, num_e_groups):
bias_tag = [[1.0] * num_bias_groups, [1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
elif mode == SUBVOXEL_USER:
for num_bias_groups in (1, num_e_groups, 2 * num_e_groups):
bias_tag = [[1.0] * num_bias_groups, [1.0] * num_bias_groups]
_source_sampling_test_template(
mode, cell_fracs_list, src_tag, bias_tag
)
else:
_source_sampling_test_template(mode, cell_fracs_list, src_tag)
def _get_num_ve_sve_and_max_num_cells(cell_fracs):
"""
Calculate the num_ve, num_sve and max_num_cells
Parameters
----------
cell_fracs : structured array, optional
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
Returns
-------
num_ve : int
Number of the total voxels
num_sve : int
Number of the total subvoxels, eqaul to or greater than num_ve
max_num_cells : int
Max number of cells (subvoxels) in a voxel
"""
num_sve = len(cell_fracs)
num_ve = len(set(cell_fracs["idx"]))
max_num_cells = -1
for i in range(num_sve):
max_num_cells = max(max_num_cells, len(cell_fracs[cell_fracs["idx"] == i]))
return num_ve, num_sve, max_num_cells
def _create_mesh_via_num_ve(num_ve):
"""
This function creates mesh from number of voxels
Parameters
----------
num_ve : int
Number of voxels
Returns
-------
mesh. MOAB mesh.
"""
x_bounds = [v * 1.0 / (num_ve) for v in range(num_ve + 1)]
mesh = Mesh(
structured=True, structured_coords=[x_bounds, [0, 1], [0, 1]], mats=None
)
return mesh
def _cal_pdf_and_biased_pdf(cell_fracs, src_tag, bias_tag=None):
"""
This function calcualtes the normalized pdf of source.
Parameters
----------
cell_fracs : structured array
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
src_tag : numpy array
An one or two dimentional array contains data of the source tag.
bias_tag : numpy array, optional
An one or two dimentional array contains data of bias tag
Returns
-------
pdf : numpy array
A three dimentional numpy array, shape=(num_ve, num_sve, num_e_groups)
biased_pdf : numpy array
A three dimentional numpy array, shape=(num_ve, num_sve, num_e_groups)
"""
num_ve, num_sve, max_num_cells = _get_num_ve_sve_and_max_num_cells(cell_fracs)
num_e_groups = len(src_tag[0]) // max_num_cells
pdf = np.empty(shape=(num_ve, max_num_cells, num_e_groups), dtype=np.float64)
pdf.fill(0.0)
for vid in range(num_ve):
for svid in range(max_num_cells):
for eid in range(num_e_groups):
pdf[vid, svid, eid] = (
src_tag[vid][svid * num_e_groups + eid]
* cell_fracs[vid * max_num_cells + svid]["vol_frac"]
)
# normalize
pdf = pdf / pdf.sum()
# calculate biased_pdf
biased_pdf = np.empty(shape=(num_ve, max_num_cells, num_e_groups), dtype=np.float64)
biased_pdf.fill(0.0)
# set up bias_array to proper value
if bias_tag == None:
# UNIFORM mode, set default bias_group and bias_array
num_bias_groups = 1
bias_array = np.empty(
shape=(num_ve, max_num_cells, num_e_groups), dtype=np.float64
)
for vid in range(num_ve):
for svid in range(max_num_cells):
for eid in range(num_e_groups):
bias_array[vid, svid, eid] = (
src_tag[vid][svid * num_e_groups + eid]
/ np.array(src_tag[vid]).sum()
)
else:
# USER mode, set bias_array according to bias_tag
num_bias_groups = len(bias_tag[0])
bias_array = np.empty(
shape=(num_ve, max_num_cells, num_e_groups), dtype=np.float64
)
bias_array.fill(0.0)
for vid in range(num_ve):
for svid in range(max_num_cells):
for eid in range(num_e_groups):
if num_bias_groups == 1:
bias_array[vid, svid, eid] = bias_tag[vid][0]
elif num_bias_groups == num_e_groups:
bias_array[vid, svid, eid] = bias_tag[vid][eid]
elif num_bias_groups == max_num_cells * num_e_groups:
bias_array[vid, svid, eid] = bias_tag[vid][
svid * num_e_groups + eid
]
else:
raise ValueError("Wrong bias_tag length")
# calculate biased_pdf
if num_bias_groups == 1:
for vid in range(num_ve):
for svid in range(max_num_cells):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
biased_pdf[vid, svid, :] = (
bias_array[vid, svid, :] * current_ve[svid]["vol_frac"]
)
elif num_bias_groups == num_e_groups:
for vid in range(num_ve):
for eid in range(num_e_groups):
for svid in range(max_num_cells):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
biased_pdf[vid, svid, eid] = (
bias_array[vid, svid, eid] * current_ve[svid]["vol_frac"]
)
elif num_bias_groups == max_num_cells * num_e_groups:
for vid in range(num_ve):
for svid in range(max_num_cells):
for eid in range(num_e_groups):
biased_pdf[vid, svid, eid] = (
bias_array[vid, svid, eid] * cell_fracs[vid]["vol_frac"]
)
# normalize biased_pdf
biased_pdf = np.divide(biased_pdf, biased_pdf.sum())
return pdf, biased_pdf
def _cal_exp_w_c(s, mode, cell_fracs, src_tag, bias_tag):
"""
This function calcualtes the exptected weight and cell_number
for a given particle (according to it's x coordinate)
Parameters
----------
s : SourceParticle
The given particle
mode : int
Mode of the source_sampling
cell_fracs : structured array
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
src_tag : numpy array
An one or two dimentional array contains data of the source tag.
bias_tag : numpy array, optional
An one or two dimentional array contains data of bias tag
Returns
-------
exp_w : float
Expected weight of the source particle
exp_c : set of available cell numbers
Expected cell number of the source particle
"""
num_ve, num_sve, max_num_cells = _get_num_ve_sve_and_max_num_cells(cell_fracs)
# calculate vid
x_bounds = [v * 1.0 / (num_ve) for v in range(num_ve + 1)]
vid = -1
for i in range(num_ve):
if x_bounds[i] <= s.x <= x_bounds[i + 1]:
vid = i
break
if vid == -1:
raise ValueError(
"x coordinate of particle not in (0, 1), s.x = {0}".format(str(s.x))
)
# calculate svid
# get number of cells/subvoxels of current voxel
current_cell_fracs = cell_fracs[cell_fracs["idx"] == vid]
num_cells = len(current_cell_fracs)
x_bounds = np.array([0.0] * (num_cells + 1))
# the x_bounds of the vid start from 1.0/num_ve*vid
x_bounds[0] = 1.0 / num_ve * vid
for svid in range(num_cells):
x_bounds[svid + 1] = (
x_bounds[svid] + 1.0 / num_ve * current_cell_fracs[svid]["vol_frac"]
)
svid = -1
for i in range(num_cells):
if x_bounds[i] <= s.x <= x_bounds[i + 1]:
svid = i
break
if svid == -1:
raise ValueError("x coordinate not in the voxel, s.x = {0}".format(str(s.x)))
# get the cell_number
exp_c = set(list(current_cell_fracs["cell"]))
# calculate eid
if mode in (0, 1, 2):
num_e_groups = len(src_tag[0])
elif mode in (3, 4, 5):
num_e_groups = len(src_tag[0]) // max_num_cells
e_bounds = np.array([i * 1.0 / num_e_groups for i in range(num_e_groups + 1)])
eid = -1
for i in range(num_e_groups):
if e_bounds[i] <= s.e <= e_bounds[i + 1]:
eid = i
break
if eid == -1:
raise ValueError("energy not in (0, 1), s.e = ".format(str(s.e)))
# calculate exp_w, weight is determined by mode, vid, svid and energy
if mode in (0, 3):
# ANALOG
exp_w = 1.0
elif mode in (1, 4):
# UNIFORM
pdf, biased_pdf = _cal_pdf_and_biased_pdf(cell_fracs, src_tag, bias_tag)
exp_w = pdf[vid, svid, eid] / biased_pdf[vid, svid, eid]
else:
# USER
pdf, biased_pdf = _cal_pdf_and_biased_pdf(cell_fracs, src_tag, bias_tag)
exp_w = pdf[vid, svid, eid] / biased_pdf[vid, svid, eid]
return exp_w, exp_c
def _get_p_y_z_halfspace(particles):
"""
This function calcualtes the probabilities of y and z half space
for a given set of particles
Parameters
----------
particles : list
List of SourceParticle
Returns
-------
p_y_halfspace : float
The probability of y half space
p_z_halfspace : float
The probability of z half space
"""
y_count, z_count = 0, 0
for s in particles:
if s.y < 0.5:
y_count = y_count + 1
if s.z < 0.5:
z_count = z_count + 1
p_y_halfspace = float(y_count) / len(particles)
p_z_halfspace = float(z_count) / len(particles)
return p_y_halfspace, p_z_halfspace
def _get_x_dis(particles, num_ve):
"""
This function calcualtes the particle distribution along x direction
for a given set of particles
Parameters
----------
particles : list
List of SourceParticle
num_ve : int
Number of voxels
Returns
-------
x_dis : one dimentional numpy array
The particle direction along x direction
"""
x_bounds = [v * 1.0 / (num_ve) for v in range(num_ve + 1)]
x_dis = np.array([0.0] * num_ve)
for i in range(num_ve):
for s in particles:
if x_bounds[i] <= s.x <= x_bounds[i + 1]:
x_dis[i] = x_dis[i] + 1
x_dis = np.divide(x_dis, len(particles))
return x_dis
def _get_x_dis_exp(mode, cell_fracs, src_tag, bias_tag=None):
"""
This function calcualtes the exptected particle distribution along x
direction.
Parameters
----------
mode : int
Mode of the source_sampling
cell_fracs : structured array
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
src_tag : numpy array
An one or two dimentional array contains data of the source tag.
bias_tag : numpy array, optional
An one or two dimentional array contains data of bias tag
Returns
-------
x_dis_exp : one dimentional numpy array
The expected particle direction along x direction
"""
num_ve, num_sve, max_num_cells = _get_num_ve_sve_and_max_num_cells(cell_fracs)
if mode in (0, 1, 2):
num_e_groups = len(src_tag[0])
elif mode in (3, 4, 5):
num_e_groups = len(src_tag[0]) // max_num_cells
x_bounds = [v * 1.0 / (num_ve) for v in range(num_ve + 1)]
x_dis_exp = np.array([0.0] * num_ve)
if mode in (0, 3):
# ANALOG, particles distribution according to the src_tag
for vid in range(num_ve):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
for svid in range(len(current_ve)):
x_dis_exp[vid] += (
current_ve[svid]["vol_frac"]
* np.array(
src_tag[vid][svid * num_e_groups : (svid + 1) * num_e_groups]
).sum()
)
elif mode in (1, 4):
# UNIFORM, particles distribution uniformly in x direction
x_dis_exp = np.array([1.0 / num_ve] * num_ve)
elif mode in (2, 5):
if bias_tag == None:
raise ValueError(
"bias_tag must be provided when mode is {0}".format(str(mode))
)
# USER, particles distribute accroding to the bias_tag
for vid in range(num_ve):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
for svid in range(len(current_ve)):
x_dis_exp[vid] += (
current_ve[svid]["vol_frac"]
* np.array(
bias_tag[vid][svid * num_e_groups : (svid + 1) * num_e_groups]
).sum()
)
# normalize x_dis_exp
x_dis_exp = np.divide(x_dis_exp, x_dis_exp.sum())
return x_dis_exp
def _get_e_dis(particles, num_e_groups):
"""
This function calcualtes the particle distribution along energy
for a given set of particles
Parameters
----------
particles : list
List of SourceParticle
num_e_groups : int
Number of energy groups
Returns
-------
e_dis : one dimentional numpy array
The particle direction along energy
"""
e_bounds = [e * 1.0 / (num_e_groups) for e in range(num_e_groups + 1)]
e_dis = np.array([0.0] * num_e_groups)
for i in range(num_e_groups):
for s in particles:
if e_bounds[i] <= s.e <= e_bounds[i + 1]:
e_dis[i] = e_dis[i] + 1
e_dis = np.divide(e_dis, len(particles))
return e_dis
def _get_e_dis_exp(mode, cell_fracs, src_tag, bias_tag=None):
"""
This function calcualtes the exptected particle distribution along energy
Parameters
----------
mode : int
Mode of the source_sampling
cell_fracs : structured array
A sorted, one dimensional array,
each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
src_tag : numpy array
An one or two dimentional array contains data of the source tag.
bias_tag : numpy array, optional
An one or two dimentional array contains data of bias tag
Returns
-------
e_dis_exp : one dimentional numpy array
The expected particle direction along energy
"""
# input check
if mode in (2, 5) and bias_tag == None:
raise ValueError("bias_tag must be provided when mode is {0}".format(str(mode)))
num_ve, num_sve, max_num_cells = _get_num_ve_sve_and_max_num_cells(cell_fracs)
if mode in (0, 1, 2):
num_e_groups = len(src_tag[0])
elif mode in (3, 4, 5):
num_e_groups = len(src_tag[0]) // max_num_cells
e_bounds = [e * 1.0 / (num_e_groups) for e in range(num_e_groups + 1)]
e_dis_exp = np.array([0.0] * num_e_groups)
if mode in (0, 1, 3, 4) or (mode in (2, 5) and len(bias_tag[0]) == 1):
# when mode is ANALOG and UNIFORM, or mode is USER but num_bias_groups is 1
# particles distribution according to the src_tag
for vid in range(num_ve):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
for svid in range(len(current_ve)):
for eid in range(num_e_groups):
e_dis_exp[eid] += (
current_ve[svid]["vol_frac"]
* src_tag[vid][svid * num_e_groups + eid]
)
elif mode == 2 or (mode == 5 and len(bias_tag[0]) == num_e_groups):
# Energy is biased according to the bias_tag
for vid in range(num_ve):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
for svid in range(len(current_ve)):
for eid in range(num_e_groups):
e_dis_exp[eid] += current_ve[svid]["vol_frac"] * bias_tag[vid][eid]
else:
for vid in range(num_ve):
current_ve = cell_fracs[cell_fracs["idx"] == vid]
for svid in range(len(current_ve)):
for eid in range(num_e_groups):
e_dis_exp[eid] += (
current_ve[svid]["vol_frac"]
* bias_tag[vid][svid * num_e_groups + eid]
)
# normalize x_dis_exp
e_dis_exp = np.divide(e_dis_exp, e_dis_exp.sum())
return e_dis_exp
@with_setup(None, try_rm_file("sampling_mesh.h5m"))
def _source_sampling_test_template(mode, cell_fracs_list, src_tag, bias_tag=None):
"""
This function serve as a template for all source_sampling test cases.
It constrcut Sampler from input parameters.
And then perform a standardized sampling and tally,
Finally, it compares tallied results with exp_answers.
Assumptions:
* Use unit cube for all the meshes
* Use structured meshes for all the tests
* filename will always be: "sampling_mesh.h5m"
* distribution changes only on X direction
* uniform distribution in Y and Z directions
* cell_number always equal to the index of sve + 1, no void cell
* voxels have the same volume
For example:
cell_fracs = [(0, 1, 0.4, 0.0),
(0, 2, 0.6, 0.0),
(1, 3, 1.0, 0.0),
(2, 4, 1.0, 0.0), ...]
voxel idx v0 v1 v2
|------------|------------|------------|--- y
| | | | | ^ z
subvoxel | sve0| sve1 | sve2 | sve3 | . . . | /
| | | | | |/
|------------|------------|------------|--- ----> x
cell_number c1 c2 c3 c4
* Energy have only two options:
- [0.0, 1.0]
- [0.0, 0.5, 1.0]
* Voxel number of meshes could be:
- 1 voxel 1 subvoxel -> Single voxel single subvoxel
- 1 voxel 2 subvoxel -> Single voxel multiple subvoxel
- 2 voxel 2 subvoxel -> Multiple voxel multiple subvoxel
- 2 voxel 4 subvoxel -> Multiple voxel multiple subvoxel
Under these assumptions:
* Mesh could be derived from cell_fracs
* e_bounds could be derived from src_tag
* construct_paras contain:
- mode
- cell_fracs
- src_tag
- bias_tag (optional, required for bias_mode == USER)
Check items:
* weight for each particle
* cell_number for each particle
* position distribution
* energy distribution
Parameters
----------
mode : int
Mode of the source sampling, could be 0, 1, 2, 3, 4 or 5
cell_fracs_list : numpy array
A one dimentional numpy array used to construct cell_fracs,
Element: (idx, cell, vol_frac, rel_error)
src_tag : numpy array
An one or two dimentional array contains data of the source tag.
bias_tag : numpy array, optional
An one or two dimentional array contains data of bias tag
Returns
-------
None
"""
sub_mode_r2s = (0, 1, 2)
sub_mode_subvoxel = (3, 4, 5)
avail_mode = (0, 1, 2, 3, 4, 5)
# input check
# check mode
if mode not in avail_mode:
raise ValueError("mode must be in (0, 1, 2, 3, 4, 5)")
# set cell_fracs
cell_fracs = np.zeros(
len(cell_fracs_list),
dtype=[
("idx", np.int64),
("cell", np.int64),
("vol_frac", np.float64),
("rel_error", np.float64),
],
)
cell_fracs[:] = cell_fracs_list
# check bias_tag
if mode in (2, 5) and bias_tag == None: # bias_mode == USER
raise ValueError("bias_tag must be given when mode is {0}".format(str(mode)))
# get number of voxel, max_num_cells
num_ve, num_sve, max_num_cells = _get_num_ve_sve_and_max_num_cells(cell_fracs)
# set up e_bounds
if mode in (0, 1, 2):
num_e_groups = len(src_tag[0])
elif mode in (3, 4, 5):
num_e_groups = len(src_tag[0]) // max_num_cells
e_bounds = [i * 1.0 / num_e_groups for i in range(num_e_groups + 1)]
e_bounds = np.array(e_bounds)
# set up mesh
m = _create_mesh_via_num_ve(num_ve)
# set up src tag
if mode in (0, 1, 2):
m.src = NativeMeshTag(num_e_groups, float)
elif mode in (3, 4, 5):
m.src = NativeMeshTag(max_num_cells * num_e_groups, float)
m.src[:] = src_tag
# set up cell_number and cell_fracs tag
m.tag_cell_fracs(cell_fracs)
# set up bias tag
if mode in (2, 5):
bias_tag_lenght = len(bias_tag[0])
m.bias = NativeMeshTag(bias_tag_lenght, float)
m.bias[:] = bias_tag
# set up tag_names
tag_names = {
"src_tag_name": "src",
"cell_number_tag_name": "cell_number",
"cell_fracs_tag_name": "cell_fracs",
"e_bounds_tag_name": "e_bounds",
}
if mode in (2, 5):
tag_names["bias_tag_name"] = "bias"
# save the mesh into h5m file
filename = "sampling_mesh.h5m"
m = tag_e_bounds(m, e_bounds)
m.write_hdf5(filename)
# construct Sampler
sampler = Sampler(filename, tag_names, mode)
# remove the temporary file
os.remove(filename)
# sampling and tally, tally should be defined by the mesh cell_fracs
num_samples = 5000
particles = []
seed(1953)
for i in range(num_samples):
rands = np.array([uniform(0, 1) for x in range(6)])
s = sampler.particle_birth(rands)
# check w, and c for each particle
# calculate the expected weight and cell_number
exp_w, exp_c = _cal_exp_w_c(s, mode, cell_fracs, src_tag, bias_tag)
assert_equal(s.w, exp_w)
# when mode in (0, 1, 2), the set exp_c is (-1), otherwise it contains
# several available cell number
if mode in (0, 1, 2):
assert set(s.cell_list) == exp_c
elif mode in (3, 4, 5):
assert set(s.cell_list).issubset(exp_c)
# store all the particles for the convinent of distribution check
particles.append(s)
# check position distribution
# X direction follow specified distribution
x_dis = _get_x_dis(particles, num_ve)
x_dis_exp = _get_x_dis_exp(mode, cell_fracs, src_tag, bias_tag)
for i in range(len(x_dis)):
assert abs(x_dis[i] - x_dis_exp[i]) / x_dis_exp[i] < 0.05
# uniform in Y and Z directions
p_y_halfspace, p_z_halfspace = _get_p_y_z_halfspace(particles)
assert abs(p_y_halfspace - 0.5) / 0.5 < 0.05
assert abs(p_z_halfspace - 0.5) / 0.5 < 0.05
# check energy distribution
e_dis = _get_e_dis(particles, num_e_groups)
e_dis_exp = _get_e_dis_exp(mode, cell_fracs, src_tag, bias_tag)
for i in range(len(e_dis)):
if e_dis_exp[i] > 0:
assert abs(e_dis[i] - e_dis_exp[i]) / e_dis_exp[i] < 0.05
else:
assert_equal(e_dis[i], 0.0)
| 34.878085
| 88
| 0.562175
| 10,701
| 70,663
| 3.511261
| 0.044295
| 0.012349
| 0.006707
| 0.005962
| 0.809656
| 0.764545
| 0.740698
| 0.719008
| 0.70099
| 0.68739
| 0
| 0.054172
| 0.309809
| 70,663
| 2,025
| 89
| 34.895309
| 0.716245
| 0.225479
| 0
| 0.693125
| 0
| 0
| 0.068881
| 0
| 0
| 0
| 0
| 0
| 0.062367
| 1
| 0.021262
| false
| 0
| 0.012048
| 0.000709
| 0.041106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7740fd874d120412af39807149d22c7247f7d97
| 203
|
py
|
Python
|
listenclosely/admin.py
|
jlmadurga/listenclosely
|
d6df9110c3ed6fd337e0236cccbe4d931bf217b0
|
[
"BSD-3-Clause"
] | 7
|
2016-01-25T15:15:54.000Z
|
2018-02-17T18:48:54.000Z
|
listenclosely/admin.py
|
jlmadurga/listenclosely
|
d6df9110c3ed6fd337e0236cccbe4d931bf217b0
|
[
"BSD-3-Clause"
] | 3
|
2016-03-11T13:22:17.000Z
|
2017-10-18T13:28:39.000Z
|
listenclosely/admin.py
|
jlmadurga/listenclosely
|
d6df9110c3ed6fd337e0236cccbe4d931bf217b0
|
[
"BSD-3-Clause"
] | 3
|
2016-12-08T17:12:35.000Z
|
2018-01-06T22:57:40.000Z
|
from django.contrib import admin
from listenclosely.models import Message, Chat, Agent, Asker
admin.site.register(Message)
admin.site.register(Chat)
admin.site.register(Agent)
admin.site.register(Asker)
| 29
| 60
| 0.82266
| 29
| 203
| 5.758621
| 0.448276
| 0.215569
| 0.407186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073892
| 203
| 7
| 61
| 29
| 0.888298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b78e3ed4e1108d302f6489c9f446a92723551a2b
| 60
|
py
|
Python
|
src/bfg/modules/__init__.py
|
rvrsh3ll/bl-bfg
|
655ada530b32d9843c36dbecef9ec682154c005a
|
[
"MIT"
] | 6
|
2022-02-16T18:37:59.000Z
|
2022-03-03T20:47:55.000Z
|
src/bfg/modules/__init__.py
|
rvrsh3ll/bl-bfg
|
655ada530b32d9843c36dbecef9ec682154c005a
|
[
"MIT"
] | null | null | null |
src/bfg/modules/__init__.py
|
rvrsh3ll/bl-bfg
|
655ada530b32d9843c36dbecef9ec682154c005a
|
[
"MIT"
] | 4
|
2022-02-16T16:50:09.000Z
|
2022-03-13T06:02:24.000Z
|
from . import http
from . import testing
#from . import smb
| 15
| 21
| 0.733333
| 9
| 60
| 4.888889
| 0.555556
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 60
| 3
| 22
| 20
| 0.916667
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b799a1394ad3a9c8dcd7ba1ef29c64e032cfcb44
| 156
|
py
|
Python
|
libs/msv/python/__init__.py
|
ITBE-Lab/ma
|
039e2833dd2e50df9285f183ff774bd87bbae710
|
[
"MIT"
] | 40
|
2019-04-28T21:16:45.000Z
|
2022-02-05T05:54:47.000Z
|
libs/msv/python/__init__.py
|
ITBE-Lab/ma
|
039e2833dd2e50df9285f183ff774bd87bbae710
|
[
"MIT"
] | 11
|
2019-04-28T22:29:12.000Z
|
2022-02-21T14:07:10.000Z
|
libs/msv/python/__init__.py
|
ITBE-Lab/ma
|
039e2833dd2e50df9285f183ff774bd87bbae710
|
[
"MIT"
] | 2
|
2019-05-06T15:29:23.000Z
|
2021-01-08T13:22:17.000Z
|
from ._lib_init import *
import MA
from .computeSvJumps import *
from .insertReads import *
from .sweepSvJumps import *
from .computeAccuracyRecall import *
| 26
| 36
| 0.801282
| 18
| 156
| 6.833333
| 0.5
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 156
| 6
| 36
| 26
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b7da0c96eaf29183828e76a76466c5735b4b4ed4
| 64
|
py
|
Python
|
tests/unit/models/utils/__init__.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
tests/unit/models/utils/__init__.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
tests/unit/models/utils/__init__.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
from .validator import * # noqa
from .checker import * # noqa
| 21.333333
| 32
| 0.6875
| 8
| 64
| 5.5
| 0.625
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 64
| 2
| 33
| 32
| 0.88
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4d227ceaa09cfe9a4cbddaf4582c5b255dcef07c
| 209
|
py
|
Python
|
experiment/AIClient.py
|
adhocmaster/pyns
|
607feb56baf0900535130195163eac331e131a2e
|
[
"MIT"
] | 1
|
2021-06-15T06:21:14.000Z
|
2021-06-15T06:21:14.000Z
|
event/AIClient.py
|
adhocmaster/pyns
|
607feb56baf0900535130195163eac331e131a2e
|
[
"MIT"
] | null | null | null |
event/AIClient.py
|
adhocmaster/pyns
|
607feb56baf0900535130195163eac331e131a2e
|
[
"MIT"
] | 1
|
2021-06-15T06:21:18.000Z
|
2021-06-15T06:21:18.000Z
|
from core.TCPClient import TCPClient
from event.PacketEvent import PacketEvent
from event.EventTypes import EventTypes
from core.SenderType import SenderType
import logging
class AIClient(TCPClient):
pass
| 26.125
| 41
| 0.84689
| 26
| 209
| 6.807692
| 0.461538
| 0.090395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119617
| 209
| 8
| 42
| 26.125
| 0.961957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4d67af64d56116da62993595b723329bee185928
| 42
|
py
|
Python
|
tensordata/paper/WACV/__init__.py
|
Hourout/tensordata
|
cbef6742ee0d3bfc4b886358fc01618bb5b63603
|
[
"Apache-2.0"
] | 13
|
2019-01-08T10:22:39.000Z
|
2020-06-17T10:02:47.000Z
|
tensordata/paper/WACV/__init__.py
|
Hourout/tensordata
|
cbef6742ee0d3bfc4b886358fc01618bb5b63603
|
[
"Apache-2.0"
] | null | null | null |
tensordata/paper/WACV/__init__.py
|
Hourout/tensordata
|
cbef6742ee0d3bfc4b886358fc01618bb5b63603
|
[
"Apache-2.0"
] | 1
|
2020-06-17T10:02:49.000Z
|
2020-06-17T10:02:49.000Z
|
from tensordata.paper.WACV._wacv import *
| 21
| 41
| 0.809524
| 6
| 42
| 5.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
128d94f18e49792a9cc0f0c06d86c9887c857967
| 34
|
py
|
Python
|
python/oops.py
|
Floozutter/silly
|
8273b4a33e2001c0a530e859c12dbc30b9590a94
|
[
"Unlicense"
] | null | null | null |
python/oops.py
|
Floozutter/silly
|
8273b4a33e2001c0a530e859c12dbc30b9590a94
|
[
"Unlicense"
] | null | null | null |
python/oops.py
|
Floozutter/silly
|
8273b4a33e2001c0a530e859c12dbc30b9590a94
|
[
"Unlicense"
] | null | null | null |
"""
oops
"""
print(0, 1 == 1, 0)
| 5.666667
| 19
| 0.382353
| 6
| 34
| 2.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 0.264706
| 34
| 5
| 20
| 6.8
| 0.36
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
12fc9abbbe6f805ac58294346c4e0f32cf3a77fb
| 204
|
py
|
Python
|
support/admin.py
|
gurupratap-matharu/django-tickets-app
|
8200af606e382f8806511c318961589f34375cdf
|
[
"MIT"
] | 1
|
2020-10-16T16:37:04.000Z
|
2020-10-16T16:37:04.000Z
|
support/admin.py
|
gurupratap-matharu/django-tickets-app
|
8200af606e382f8806511c318961589f34375cdf
|
[
"MIT"
] | null | null | null |
support/admin.py
|
gurupratap-matharu/django-tickets-app
|
8200af606e382f8806511c318961589f34375cdf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Holiday, Vendor, Category, Ticket
admin.site.register(Holiday)
admin.site.register(Vendor)
admin.site.register(Category)
admin.site.register(Ticket)
| 22.666667
| 53
| 0.813725
| 28
| 204
| 5.928571
| 0.428571
| 0.216867
| 0.409639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 204
| 8
| 54
| 25.5
| 0.887701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4236ceac66256b2c5cf2bcd12bce120d53a13c2a
| 4,782
|
py
|
Python
|
ifx_db/tests/test_159_FetchAssocSeveralRows_01.py
|
ifxdb/PythonIfxDB
|
a9c64e8ade1329b7102f0bf356c0e4b6d230ca95
|
[
"Apache-2.0"
] | 3
|
2017-05-01T10:22:27.000Z
|
2021-12-29T11:02:34.000Z
|
ifx_db/tests/test_159_FetchAssocSeveralRows_01.py
|
ifxdb/PythonIfxDB
|
a9c64e8ade1329b7102f0bf356c0e4b6d230ca95
|
[
"Apache-2.0"
] | 1
|
2020-01-07T12:56:26.000Z
|
2020-01-07T12:56:26.000Z
|
ifx_db/tests/test_159_FetchAssocSeveralRows_01.py
|
ifxdb/PythonIfxDB
|
a9c64e8ade1329b7102f0bf356c0e4b6d230ca95
|
[
"Apache-2.0"
] | 3
|
2017-05-10T16:03:25.000Z
|
2018-03-19T14:59:41.000Z
|
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ifx_db
import config
from testfunctions import IfxDbTestFunctions
class IfxDbTestCase(unittest.TestCase):
def test_159_FetchAssocSeveralRows_01(self):
obj = IfxDbTestFunctions()
obj.assert_expect(self.run_test_159)
def run_test_159(self):
conn = ifx_db.connect(config.ConnStr, config.user, config.password)
server = ifx_db.server_info( conn )
if (server.DBMS_NAME[0:3] == 'Inf'):
op = {ifx_db.ATTR_CASE: ifx_db.CASE_UPPER}
ifx_db.set_option(conn, op, 1)
result = ifx_db.exec_immediate(conn, "select name,job from staff")
i = 1
row = ifx_db.fetch_assoc(result)
while ( row ):
#printf("%3d %10s %10s\n",i, row['NAME'], row['JOB'])
print "%3d %10s %10s" % (i, row['NAME'], row['JOB'])
i += 1
row = ifx_db.fetch_assoc(result)
#__END__
#__LUW_EXPECTED__
# 1 Sanders Mgr
# 2 Pernal Sales
# 3 Marenghi Mgr
# 4 OBrien Sales
# 5 Hanes Mgr
# 6 Quigley Sales
# 7 Rothman Sales
# 8 James Clerk
# 9 Koonitz Sales
# 10 Plotz Mgr
# 11 Ngan Clerk
# 12 Naughton Clerk
# 13 Yamaguchi Clerk
# 14 Fraye Mgr
# 15 Williams Sales
# 16 Molinare Mgr
# 17 Kermisch Clerk
# 18 Abrahams Clerk
# 19 Sneider Clerk
# 20 Scoutten Clerk
# 21 Lu Mgr
# 22 Smith Sales
# 23 Lundquist Clerk
# 24 Daniels Mgr
# 25 Wheeler Clerk
# 26 Jones Mgr
# 27 Lea Mgr
# 28 Wilson Sales
# 29 Quill Mgr
# 30 Davis Sales
# 31 Graham Sales
# 32 Gonzales Sales
# 33 Burke Clerk
# 34 Edwards Sales
# 35 Gafney Clerk
#__ZOS_EXPECTED__
# 1 Sanders Mgr
# 2 Pernal Sales
# 3 Marenghi Mgr
# 4 OBrien Sales
# 5 Hanes Mgr
# 6 Quigley Sales
# 7 Rothman Sales
# 8 James Clerk
# 9 Koonitz Sales
# 10 Plotz Mgr
# 11 Ngan Clerk
# 12 Naughton Clerk
# 13 Yamaguchi Clerk
# 14 Fraye Mgr
# 15 Williams Sales
# 16 Molinare Mgr
# 17 Kermisch Clerk
# 18 Abrahams Clerk
# 19 Sneider Clerk
# 20 Scoutten Clerk
# 21 Lu Mgr
# 22 Smith Sales
# 23 Lundquist Clerk
# 24 Daniels Mgr
# 25 Wheeler Clerk
# 26 Jones Mgr
# 27 Lea Mgr
# 28 Wilson Sales
# 29 Quill Mgr
# 30 Davis Sales
# 31 Graham Sales
# 32 Gonzales Sales
# 33 Burke Clerk
# 34 Edwards Sales
# 35 Gafney Clerk
#__SYSTEMI_EXPECTED__
# 1 Sanders Mgr
# 2 Pernal Sales
# 3 Marenghi Mgr
# 4 OBrien Sales
# 5 Hanes Mgr
# 6 Quigley Sales
# 7 Rothman Sales
# 8 James Clerk
# 9 Koonitz Sales
# 10 Plotz Mgr
# 11 Ngan Clerk
# 12 Naughton Clerk
# 13 Yamaguchi Clerk
# 14 Fraye Mgr
# 15 Williams Sales
# 16 Molinare Mgr
# 17 Kermisch Clerk
# 18 Abrahams Clerk
# 19 Sneider Clerk
# 20 Scoutten Clerk
# 21 Lu Mgr
# 22 Smith Sales
# 23 Lundquist Clerk
# 24 Daniels Mgr
# 25 Wheeler Clerk
# 26 Jones Mgr
# 27 Lea Mgr
# 28 Wilson Sales
# 29 Quill Mgr
# 30 Davis Sales
# 31 Graham Sales
# 32 Gonzales Sales
# 33 Burke Clerk
# 34 Edwards Sales
# 35 Gafney Clerk
#__IDS_EXPECTED__
# 1 Sanders Mgr
# 2 Pernal Sales
# 3 Marenghi Mgr
# 4 OBrien Sales
# 5 Hanes Mgr
# 6 Quigley Sales
# 7 Rothman Sales
# 8 James Clerk
# 9 Koonitz Sales
# 10 Plotz Mgr
# 11 Ngan Clerk
# 12 Naughton Clerk
# 13 Yamaguchi Clerk
# 14 Fraye Mgr
# 15 Williams Sales
# 16 Molinare Mgr
# 17 Kermisch Clerk
# 18 Abrahams Clerk
# 19 Sneider Clerk
# 20 Scoutten Clerk
# 21 Lu Mgr
# 22 Smith Sales
# 23 Lundquist Clerk
# 24 Daniels Mgr
# 25 Wheeler Clerk
# 26 Jones Mgr
# 27 Lea Mgr
# 28 Wilson Sales
# 29 Quill Mgr
# 30 Davis Sales
# 31 Graham Sales
# 32 Gonzales Sales
# 33 Burke Clerk
# 34 Edwards Sales
# 35 Gafney Clerk
| 26.566667
| 71
| 0.508783
| 563
| 4,782
| 4.234458
| 0.257549
| 0.018876
| 0.026846
| 0.031879
| 0.778523
| 0.766779
| 0.766779
| 0.766779
| 0.744966
| 0.744966
| 0
| 0.102925
| 0.435174
| 4,782
| 179
| 72
| 26.715084
| 0.779711
| 0.773317
| 0
| 0.095238
| 0
| 0
| 0.052688
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| null | null | 0.047619
| 0.190476
| null | null | 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
423966a1f3ad6f82b1548613963497a3a9697e4e
| 6,263
|
py
|
Python
|
sdk/python/pulumi_aws/directoryservice/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/directoryservice/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/directoryservice/_inputs.py
|
mdop-wh/pulumi-aws
|
05bb32e9d694dde1c3b76d440fd2cd0344d23376
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'DirectoryConnectSettingsArgs',
'DirectoryVpcSettingsArgs',
]
@pulumi.input_type
class DirectoryConnectSettingsArgs:
def __init__(__self__, *,
customer_dns_ips: pulumi.Input[List[pulumi.Input[str]]],
customer_username: pulumi.Input[str],
subnet_ids: pulumi.Input[List[pulumi.Input[str]]],
vpc_id: pulumi.Input[str],
availability_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None,
connect_ips: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[List[pulumi.Input[str]]] customer_dns_ips: The DNS IP addresses of the domain to connect to.
:param pulumi.Input[str] customer_username: The username corresponding to the password provided.
:param pulumi.Input[List[pulumi.Input[str]]] subnet_ids: The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
:param pulumi.Input[str] vpc_id: The identifier of the VPC that the directory is in.
:param pulumi.Input[List[pulumi.Input[str]]] connect_ips: The IP addresses of the AD Connector servers.
"""
pulumi.set(__self__, "customer_dns_ips", customer_dns_ips)
pulumi.set(__self__, "customer_username", customer_username)
pulumi.set(__self__, "subnet_ids", subnet_ids)
pulumi.set(__self__, "vpc_id", vpc_id)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if connect_ips is not None:
pulumi.set(__self__, "connect_ips", connect_ips)
@property
@pulumi.getter(name="customerDnsIps")
def customer_dns_ips(self) -> pulumi.Input[List[pulumi.Input[str]]]:
"""
The DNS IP addresses of the domain to connect to.
"""
return pulumi.get(self, "customer_dns_ips")
@customer_dns_ips.setter
def customer_dns_ips(self, value: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(self, "customer_dns_ips", value)
@property
@pulumi.getter(name="customerUsername")
def customer_username(self) -> pulumi.Input[str]:
"""
The username corresponding to the password provided.
"""
return pulumi.get(self, "customer_username")
@customer_username.setter
def customer_username(self, value: pulumi.Input[str]):
pulumi.set(self, "customer_username", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[List[pulumi.Input[str]]]:
"""
The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The identifier of the VPC that the directory is in.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[List[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter(name="connectIps")
def connect_ips(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:
"""
The IP addresses of the AD Connector servers.
"""
return pulumi.get(self, "connect_ips")
@connect_ips.setter
def connect_ips(self, value: Optional[pulumi.Input[List[pulumi.Input[str]]]]):
pulumi.set(self, "connect_ips", value)
@pulumi.input_type
class DirectoryVpcSettingsArgs:
def __init__(__self__, *,
subnet_ids: pulumi.Input[List[pulumi.Input[str]]],
vpc_id: pulumi.Input[str],
availability_zones: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[List[pulumi.Input[str]]] subnet_ids: The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
:param pulumi.Input[str] vpc_id: The identifier of the VPC that the directory is in.
"""
pulumi.set(__self__, "subnet_ids", subnet_ids)
pulumi.set(__self__, "vpc_id", vpc_id)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[List[pulumi.Input[str]]]:
"""
The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[List[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The identifier of the VPC that the directory is in.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[List[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
| 39.14375
| 153
| 0.660706
| 790
| 6,263
| 5.046835
| 0.127848
| 0.16002
| 0.119388
| 0.115877
| 0.839227
| 0.755204
| 0.745924
| 0.683471
| 0.637321
| 0.637321
| 0
| 0.001837
| 0.217627
| 6,263
| 159
| 154
| 39.389937
| 0.811837
| 0.22114
| 0
| 0.616162
| 1
| 0
| 0.105923
| 0.011241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.20202
| false
| 0
| 0.050505
| 0.020202
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
424f648d196dac7e2486685ff9c4e899613672c5
| 108,792
|
py
|
Python
|
tests/samsung_multiroom/api/test_api.py
|
kusma/samsung_multiroom
|
09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61
|
[
"MIT"
] | 6
|
2019-04-05T19:10:39.000Z
|
2021-11-23T17:26:49.000Z
|
tests/samsung_multiroom/api/test_api.py
|
kusma/samsung_multiroom
|
09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61
|
[
"MIT"
] | 3
|
2020-09-25T06:58:00.000Z
|
2021-12-13T19:57:50.000Z
|
tests/samsung_multiroom/api/test_api.py
|
kusma/samsung_multiroom
|
09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61
|
[
"MIT"
] | 4
|
2019-04-05T18:58:11.000Z
|
2021-07-22T19:54:56.000Z
|
import re
import unittest
from unittest.mock import MagicMock
import httpretty
import requests
import xmltodict
from samsung_multiroom.api import COMMAND_CPM
from samsung_multiroom.api import COMMAND_UIC
from samsung_multiroom.api import METHOD_GET
from samsung_multiroom.api import SamsungMultiroomApi
from samsung_multiroom.api import SamsungMultiroomApiException
from samsung_multiroom.api import paginator
def _get_api():
return SamsungMultiroomApi('public', '192.168.1.129', 55001)
class TestApi(unittest.TestCase):
def test_invalid_method_raises_exception(self):
api = _get_api()
self.assertRaises(ValueError, api.request, 'post', COMMAND_CPM, '<name>GetSpkName</name>')
def test_invalid_command_raises_exception(self):
api = _get_api()
self.assertRaises(ValueError, api.request, METHOD_GET, 'INVALIDCOMMAND', '<name>GetSpkName</name>')
@httpretty.activate(allow_net_connect=False)
def test_request_timeout_raises_exception(self):
def exception_response():
raise requests.exceptions.TimeoutException()
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://192.168.1.129:55001/.*'),
body=exception_response
)
api = _get_api()
self.assertRaises(SamsungMultiroomApiException, api.request, METHOD_GET, COMMAND_UIC, '<name>GetSpkName</name>')
@httpretty.activate(allow_net_connect=False)
def test_request_bad_result_raises_exception(self):
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://192.168.1.129:55001/.*'),
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SpkName</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ng"></response>
</UIC>"""
)
api = _get_api()
self.assertRaises(SamsungMultiroomApiException, api.request, METHOD_GET, COMMAND_UIC, '<name>GetSpkName</name>')
@httpretty.activate(allow_net_connect=False)
def test_request_returns_valid_response(self):
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://192.168.1.129:55001/.*'),
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SpkName</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<spkname><![CDATA[Living Room]]></spkname>
</response>
</UIC>"""
)
api = _get_api()
response = api.request(METHOD_GET, COMMAND_UIC, '<name>GetSpkName</name>')
self.assertEqual(response, {
'spkname': 'Living Room'
})
@httpretty.activate(allow_net_connect=False)
def test_get_speaker_name(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetSpkName%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SpkName</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<spkname><![CDATA[Living Room]]></spkname>
</response>
</UIC>"""
)
api = _get_api()
speaker_name = api.get_speaker_name()
self.assertEqual(speaker_name, 'Living Room')
@httpretty.activate(allow_net_connect=False)
def test_set_speaker_name(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetSpkName%3C/name%3E%3Cp%20type=%22cdata%22%20name=%22spkname%22%20val=%22empty%22%3E%3C![CDATA[Living%20Room]]%3E%3C/p%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SpkName</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<spkname><![CDATA[Living Room]]></spkname>
</response>
</UIC>"""
)
api = _get_api()
speaker_name = api.set_speaker_name('Living Room')
@unittest.mock.patch('socket.socket')
def test_get_main_info(self, s):
s.return_value.recv.side_effect = [
b"""HTTP/1.1 200 OK
Date: Fri, 02 Jan 1970 10:53:13 GMT
Server: Samsung/1.0
Content-Type: text/html
Content-Length: 215
Connection: close
Last-Modified: Fri, 02 Jan 1970 10:53:13 GMT
<?xml version="1.0" encoding="UTF-8"?><UIC><method>RequestDeviceInfo</method><version>1.0</version><speakerip>192.168.1.129</speakerip><user_identifier>public</user_identifier><response result="ok"></response></UIC>""",
b"""HTTP/1.1 200 OK
Date: Fri, 02 Jan 1970 10:53:13 GMT
Server: Samsung/1.0
Content-Type: text/html
Content-Length: 678
Connection: close
Last-Modified: Fri, 02 Jan 1970 10:53:13 GMT
<?xml version="1.0" encoding="UTF-8"?><UIC><method>MainInfo</method><version>1.0</version><speakerip>192.168.1.129</speakerip><user_identifier></user_identifier><response result="ok"><party>off</party><partymain></partymain><grouptype>N</grouptype><groupmainip>0.0.0.0</groupmainip><groupmainmacaddr>00:00:00:00:00:00</groupmainmacaddr><spkmacaddr>xx:xx:xx:xx:xx:xx</spkmacaddr><spkmodelname>HW-K650</spkmodelname><groupmode>none</groupmode><channeltype>front</channeltype><channelvolume>0</channelvolume><multichinfo>on</multichinfo><groupspknum>1</groupspknum><dfsstatus>dfsoff</dfsstatus><protocolver>2.3</protocolver><btmacaddr>yy:yy:yy:yy:yy:yy</btmacaddr></response></UIC>""",
b'',
]
api = _get_api()
main_info = api.get_main_info()
self.assertEqual(main_info, {
'party': 'off',
'partymain': None,
'grouptype': 'N',
'groupmainip': '0.0.0.0',
'groupmainmacaddr': '00:00:00:00:00:00',
'spkmacaddr': 'xx:xx:xx:xx:xx:xx',
'spkmodelname': 'HW-K650',
'groupmode': 'none',
'channeltype': 'front',
'channelvolume': '0',
'multichinfo': 'on',
'groupspknum': '1',
'dfsstatus': 'dfsoff',
'protocolver': '2.3',
'btmacaddr': 'yy:yy:yy:yy:yy:yy',
})
@httpretty.activate(allow_net_connect=False)
def test_get_volume(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetVolume%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>VolumeLevel</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<volume>10</volume>
</response>
</UIC>"""
)
api = _get_api()
volume = api.get_volume()
self.assertEqual(volume, 10)
@httpretty.activate(allow_net_connect=False)
def test_set_volume(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetVolume%3C/name%3E%3Cp%20type=%22dec%22%20name=%22volume%22%20val=%2210%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>VolumeLevel</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<volume>10</volume>
</response>
</UIC>"""
)
api = _get_api()
api.set_volume(10)
@httpretty.activate(allow_net_connect=False)
def test_get_mute(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetMute%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MuteStatus</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<mute>off</mute>
</response>
</UIC>"""
)
api = _get_api()
mute = api.get_mute()
self.assertEqual(mute, False)
@httpretty.activate(allow_net_connect=False)
def test_set_mute(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetMute%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22mute%22%20val%3D%22on%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MuteStatus</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<mute>on</mute>
</response>
</UIC>"""
)
api = _get_api()
api.set_mute(True)
@httpretty.activate(allow_net_connect=False)
def test_get_func(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetFunc%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>CurrentFunc</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<function>wifi</function>
<submode>dlna</submode>
<connection></connection>
<devicename><![CDATA[]]></devicename>
</response>
</UIC>"""
)
api = _get_api()
func = api.get_func()
self.assertEqual(func, {
'function': 'wifi',
'submode': 'dlna',
'connection': None,
'devicename': None,
})
@httpretty.activate(allow_net_connect=False)
def test_set_func(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetFunc%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22function%22%20val%3D%22bt%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>PlayStatus</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<function>bt</function>
<playstatus>pause</playstatus>
</response>
</UIC>"""
)
api = _get_api()
api.set_func('bt')
@httpretty.activate(allow_net_connect=False)
def test_get_shuffle_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetShuffleMode%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>ShuffleMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<shuffle>on</shuffle>
</response>
</UIC>"""
)
api = _get_api()
shuffle_mode = api.get_shuffle_mode()
self.assertEqual(shuffle_mode, True)
@httpretty.activate(allow_net_connect=False)
def test_set_shuffle_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetShuffleMode%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22shufflemode%22%20val%3D%22on%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>ShuffleMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<shuffle>on</shuffle>
</response>
</UIC>"""
)
api = _get_api()
api.set_shuffle_mode(True)
@httpretty.activate(allow_net_connect=False)
def test_set_trick_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetTrickMode%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22trickmode%22%20val%3D%22next%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>CurrentFunc</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<function>wifi</function>
<submode>dlna</submode>
<connection></connection>
<devicename><![CDATA[]]></devicename>
</response>
</UIC>"""
)
api = _get_api()
api.set_trick_mode('next')
@httpretty.activate(allow_net_connect=False)
def test_set_playback_control(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetPlaybackControl%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22playbackcontrol%22%20val%3D%22pause%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>PlaybackStatus</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<playstatus>pause</playstatus>
</response>
</UIC>"""
)
api = _get_api()
api.set_playback_control('pause')
@httpretty.activate(allow_net_connect=False)
def test_get_music_info(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetMusicInfo%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MusicInfo</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
<playertype>allshare</playertype>
<playbacktype>folder</playbacktype>
<sourcename><![CDATA[]]></sourcename>
<parentid>22$30224</parentid>
<parentid2></parentid2>
<playindex>8</playindex>
<objectid><![CDATA[22$@52947]]></objectid>
<title><![CDATA[New star in the sky]]></title>
<artist><![CDATA[Air]]></artist>
<album><![CDATA[Moon Safari]]></album>
<thumbnail><![CDATA[http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52947.jpg]]></thumbnail>
<timelength>0:05:40.000</timelength>
<playtime>325067</playtime>
<seek>enable</seek>
<pause>enable</pause>
</response>
</UIC>"""
)
api = _get_api()
music_info = api.get_music_info()
self.assertEqual(music_info['title'], 'New star in the sky')
self.assertEqual(music_info['artist'], 'Air')
self.assertEqual(music_info['album'], 'Moon Safari')
self.assertEqual(music_info['thumbnail'], 'http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52947.jpg')
self.assertEqual(music_info['timelength'], '0:05:40.000')
self.assertEqual(music_info['playtime'], '325067')
self.assertEqual(music_info['seek'], 'enable')
self.assertEqual(music_info['pause'], 'enable')
@httpretty.activate(allow_net_connect=False)
def test_get_play_status(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetPlayStatus%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>PlayStatus</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier></user_identifier>
<response result="ok">
<function>wifi</function>
<submode>dlna</submode>
<playstatus>play</playstatus>
</response>
</UIC>"""
)
api = _get_api()
func = api.get_play_status()
self.assertEqual(func, {
'function': 'wifi',
'submode': 'dlna',
'playstatus': 'play',
})
@httpretty.activate(allow_net_connect=False)
def test_set_search_time(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetSearchTime%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22playtime%22%20val%3D%2250%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MusicPlayTime</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<timelength>431</timelength>
<playtime>50</playtime>
</response>
</UIC>"""
)
api = _get_api()
api.set_search_time(50)
@httpretty.activate(allow_net_connect=False)
def test_get_preset_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetPresetList%3C%2Fname%3E%3Cp%20type=%22dec%22%20name=%22startindex%22%20val=%220%22/%3E%3Cp%20type=%22dec%22%20name=%22listcount%22%20val=%2210%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>PresetList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<totallistcount>6</totallistcount>
<startindex>0</startindex>
<listcount>6</listcount>
<timestamp>2018-12-28T17:44:10Z</timestamp>
<presetlisttype>0</presetlisttype>
<presetlist>
<preset>
<kind>speaker</kind>
<title>Radio Swiss Jazz (Jazz Music)</title>
<description>Manu Dibango - Milady's Song</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s6814t.png</thumbnail>
<contentid>0</contentid>
<mediaid>s6814</mediaid>
</preset>
<preset>
<kind>speaker</kind>
<title>93.5 | BBC Radio 4 (US News)</title>
<description>Intelligent speech</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s25419t.png</thumbnail>
<contentid>1</contentid>
<mediaid>s25419</mediaid>
</preset>
<preset>
<kind>speaker</kind>
<title>89.1 | BBC Radio 2 (Adult Hits)</title>
<description>Amazing music. Played by an amazing line up.</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24940t.png</thumbnail>
<contentid>2</contentid>
<mediaid>s24940</mediaid>
</preset>
<preset>
<kind>my</kind>
<title>Radio Swiss Jazz (Jazz Music)</title>
<description>Groovin' J 5 - This Here</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s6814t.png</thumbnail>
<contentid>3</contentid>
<mediaid>s6814</mediaid>
</preset>
<preset>
<kind>my</kind>
<title>91.3 | BBC Radio 3 (Classical Music)</title>
<description>Live music and arts</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24941t.png</thumbnail>
<contentid>4</contentid>
<mediaid>s24941</mediaid>
</preset>
<preset>
<kind>my</kind>
<title>93.5 | BBC Radio 4 (US News)</title>
<description>Intelligent speech</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s25419t.png</thumbnail>
<contentid>5</contentid>
<mediaid>s25419</mediaid>
</preset>
</presetlist>
</response>
</CPM>"""
)
api = _get_api()
preset_list = api.get_preset_list(0, 10)
self.assertEqual(len(preset_list), 6)
self.assertEqual(preset_list[0]['kind'], 'speaker')
self.assertEqual(preset_list[0]['title'], 'Radio Swiss Jazz (Jazz Music)')
self.assertEqual(preset_list[0]['description'], 'Manu Dibango - Milady\'s Song')
self.assertEqual(preset_list[0]['thumbnail'], 'http://cdn-radiotime-logos.tunein.com/s6814t.png')
self.assertEqual(preset_list[0]['contentid'], '0')
self.assertEqual(preset_list[0]['mediaid'], 's6814')
@httpretty.activate(allow_net_connect=False)
def test_get_radio_info(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetRadioInfo%3C%2Fname%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioInfo</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Favorites</root>
<presetindex>0</presetindex>
<title>Radio Swiss Jazz (Jazz Music)</title>
<description>Manu Dibango - Milady's Song</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s6814d.png</thumbnail>
<mediaid>s6814</mediaid>
<allowfeedback>0</allowfeedback>
<timestamp>2018-12-28T18:07:07Z</timestamp>
<no_queue>1</no_queue>
<playstatus>play</playstatus>
</response>
</CPM>"""
)
api = _get_api()
func = api.get_radio_info()
self.assertEqual(func, {
'cpname': 'TuneIn',
'root': 'Favorites',
'presetindex': '0',
'title': 'Radio Swiss Jazz (Jazz Music)',
'description': 'Manu Dibango - Milady\'s Song',
'thumbnail': 'http://cdn-radiotime-logos.tunein.com/s6814d.png',
'mediaid': 's6814',
'allowfeedback': '0',
'timestamp': '2018-12-28T18:07:07Z',
'no_queue': '1',
'playstatus': 'play',
})
@httpretty.activate(allow_net_connect=False)
def test_set_play_preset(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetPlayPreset%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presettype%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>StopPlaybackEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<playtime>0</playtime>
</response>
</UIC>"""
)
api = _get_api()
api.set_play_preset(1, 0)
@httpretty.activate(allow_net_connect=False)
def test_set_select_radio(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetSelectRadio%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioSelected</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<signinstatus>0</signinstatus>
<timestamp>2018-12-28T18:35:17Z</timestamp>
<audioinfo>
<title>Radio Swiss Jazz (Jazz Music)</title>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s6814d.png</thumbnail>
<playstatus>play</playstatus>
</audioinfo>
</response>
</CPM>"""
)
api = _get_api()
api.set_select_radio()
@httpretty.activate(allow_net_connect=False)
def test_get_dms_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetDmsList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22liststartindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>DmsList</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<listtotalcount>1</listtotalcount>
<liststartindex>0</liststartindex>
<listcount>1</listcount>
<dmslist>
<dms device_id="0">
<dmsid>uuid:00113249-398f-0011-8f39-8f3949321100</dmsid>
<dmsname><![CDATA[nas]]></dmsname>
<devicetype>network</devicetype>
<thumbnail_PNG_LRG><![CDATA[http://192.168.1.111:50001/tmp_icon/dmsicon120.png]]></thumbnail_PNG_LRG>
<thumbnail_JPG_LRG><![CDATA[http://192.168.1.111:50001/tmp_icon/dmsicon120.jpg]]></thumbnail_JPG_LRG>
<thumbnail_PNG_SM><![CDATA[http://192.168.1.111:50001/tmp_icon/dmsicon48.png]]></thumbnail_PNG_SM>
<thumbnail_JPG_SM><![CDATA[http://192.168.1.111:50001/tmp_icon/dmsicon48.jpg]]></thumbnail_JPG_SM>
</dms>
</dmslist>
</response>
</UIC>"""
)
api = _get_api()
dms_list = api.get_dms_list(0, 20)
self.assertEqual(len(dms_list), 1)
self.assertEqual(dms_list[0], {
'@device_id': '0',
'dmsid': 'uuid:00113249-398f-0011-8f39-8f3949321100',
'dmsname': 'nas',
'devicetype': 'network',
'thumbnail_PNG_LRG': 'http://192.168.1.111:50001/tmp_icon/dmsicon120.png',
'thumbnail_JPG_LRG': 'http://192.168.1.111:50001/tmp_icon/dmsicon120.jpg',
'thumbnail_PNG_SM': 'http://192.168.1.111:50001/tmp_icon/dmsicon48.png',
'thumbnail_JPG_SM': 'http://192.168.1.111:50001/tmp_icon/dmsicon48.jpg',
})
@httpretty.activate(allow_net_connect=False)
def test_pc_get_music_list_by_category(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EPCGetMusicListByCategory%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22device_udn%22%20val%3D%22uuid%3A00113249-398f-0011-8f39-8f3949321100%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22filter%22%20val%3D%22folder%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22categoryid%22%20val%3D%22folder%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22liststartindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>PCMusicList</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<listtotalcount>3</listtotalcount>
<liststartindex>0</liststartindex>
<listcount>3</listcount>
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
<filter>folder</filter>
<playertype>myphone</playertype>
<playbacktype>playlist</playbacktype>
<sourcename><![CDATA[nas]]></sourcename>
<parentid>0</parentid>
<parentid2 />
<musiclist>
<music object_id="21">
<type>CONTAINER</type>
<playindex>-1</playindex>
<name />
<title><![CDATA[Music]]></title>
<artist />
<album />
<thumbnail />
<timelength />
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
</music>
<music object_id="37">
<type>CONTAINER</type>
<playindex>-1</playindex>
<name />
<title><![CDATA[Photo]]></title>
<artist />
<album />
<thumbnail />
<timelength />
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
</music>
<music object_id="44">
<type>CONTAINER</type>
<playindex>-1</playindex>
<name />
<title><![CDATA[Video]]></title>
<artist />
<album />
<thumbnail />
<timelength />
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
</music>
</musiclist>
</response>
</UIC>"""
)
api = _get_api()
music_list = api.pc_get_music_list_by_category('uuid:00113249-398f-0011-8f39-8f3949321100', 0, 20)
self.assertEqual(len(music_list), 3)
self.assertEqual(music_list[0], {
'@object_id': '21',
'type': 'CONTAINER',
'playindex': '-1',
'name': None,
'title': 'Music',
'artist': None,
'album': None,
'thumbnail': None,
'timelength': None,
'device_udn': 'uuid:00113249-398f-0011-8f39-8f3949321100',
})
@httpretty.activate(allow_net_connect=False)
def test_pc_get_music_list_by_id(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EPCGetMusicListByID%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22device_udn%22%20val%3D%22uuid%3A00113249-398f-0011-8f39-8f3949321100%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22filter%22%20val%3D%22folder%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22parentid%22%20val%3D%2222%2430224%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22liststartindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>PCMusicList</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<listtotalcount>2</listtotalcount>
<liststartindex>0</liststartindex>
<listcount>2</listcount>
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
<filter>folder</filter>
<playertype>myphone</playertype>
<playbacktype>playlist</playbacktype>
<sourcename><![CDATA[nas]]></sourcename>
<parentid>22$30224</parentid>
<parentid2 />
<musiclist>
<music object_id="22$@52941">
<type>AUDIO</type>
<playindex>0</playindex>
<name><![CDATA[La femme d'argent.mp3]]></name>
<title><![CDATA[La femme d'argent]]></title>
<artist><![CDATA[Air]]></artist>
<album><![CDATA[Moon Safari]]></album>
<thumbnail><![CDATA[http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52941.jpg]]></thumbnail>
<timelength>0:07:11.000</timelength>
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
</music>
<music object_id="22$@52942">
<type>AUDIO</type>
<playindex>1</playindex>
<name><![CDATA[Sexy boy.mp3]]></name>
<title><![CDATA[Sexy boy]]></title>
<artist><![CDATA[Air]]></artist>
<album><![CDATA[Moon Safari]]></album>
<thumbnail><![CDATA[http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52942.jpg]]></thumbnail>
<timelength>0:04:58.000</timelength>
<device_udn>uuid:00113249-398f-0011-8f39-8f3949321100</device_udn>
</music>
</musiclist>
</response>
</UIC>"""
)
api = _get_api()
music_list = api.pc_get_music_list_by_id('uuid:00113249-398f-0011-8f39-8f3949321100', '22$30224', 0, 20)
self.assertEqual(len(music_list), 2)
self.assertEqual(music_list[0], {
'@object_id': '22$@52941',
'type': 'AUDIO',
'playindex': '0',
'name': 'La femme d\'argent.mp3',
'title': 'La femme d\'argent',
'artist': 'Air',
'album': 'Moon Safari',
'thumbnail': 'http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52941.jpg',
'timelength': '0:07:11.000',
'device_udn': 'uuid:00113249-398f-0011-8f39-8f3949321100',
})
@httpretty.activate(allow_net_connect=False)
def test_set_playlist_playback_control(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetPlaylistPlaybackControl%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22playbackcontrol%22%20val%3D%22play%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22playertype%22%20val%3D%22allshare%22/%3E%3Cp%20type%3D%22cdata%22%20name%3D%22sourcename%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5B%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22dec%22%20name%3D%22playindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22playtime%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22totalobjectcount%22%20val%3D%221%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22device_udn%22%20val%3D%22uuid%3A00113249-398f-0011-8f39-8f3949321100%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22objectid%22%20val%3D%2222%24%4052942%22/%3E%3Cp%20type%3D%22cdata%22%20name%3D%22songtitle%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BSexy%20boy%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22cdata%22%20name%3D%22thumbnail%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5Bhttp%3A//192.168.1.111%3A50002/transcoder/jpegtnscaler.cgi/folderart/52941.jpg%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22cdata%22%20name%3D%22artist%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BAir%5D%5D%3E%3C/p%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>StopPlaybackEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<playtime>0</playtime>
</response>
</UIC>"""
)
items = [
{
'device_udn': 'uuid:00113249-398f-0011-8f39-8f3949321100',
'object_id': '22$@52942',
'title': 'Sexy boy',
'thumbnail': 'http://192.168.1.111:50002/transcoder/jpegtnscaler.cgi/folderart/52941.jpg',
'artist': 'Air',
}
]
api = _get_api()
api.set_playlist_playback_control(items)
@httpretty.activate(allow_net_connect=False)
def test_browse_main(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EBrowseMain%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Browse</root>
<browsemode>0</browsemode>
<category isroot="1">Browse</category>
<totallistcount>4</totallistcount>
<startindex>0</startindex>
<listcount>4</listcount>
<timestamp>2018-12-31T16:06:37Z</timestamp>
<menulist>
<menuitem type="0">
<title>Favorites</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="0">
<title>Local Radio</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="0">
<title>Recents</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="0">
<title>Trending</title>
<contentid>3</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
items = api.browse_main(0, 30)
self.assertEqual(len(items), 4)
self.assertEqual(items[0], {
'@type': '0',
'title': 'Favorites',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_get_select_radio_list_with_folders(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetSelectRadioList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22contentid%22%20val%3D%2210%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Browse</root>
<browsemode>0</browsemode>
<category isroot="0">By Language</category>
<totallistcount>4</totallistcount>
<startindex>0</startindex>
<listcount>4</listcount>
<timestamp>2018-12-31T16:23:16Z</timestamp>
<menulist>
<menuitem type="0">
<title>Aboriginal</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="0">
<title>Afrikaans</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="0">
<title>Akan</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="0">
<title>Albanian</title>
<contentid>3</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
items = api.get_select_radio_list(10, 0, 30)
self.assertEqual(len(items), 4)
self.assertEqual(items[0], {
'@type': '0',
'title': 'Aboriginal',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_get_select_radio_list_with_radios(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetSelectRadioList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22contentid%22%20val%3D%223%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Browse</root>
<browsemode>0</browsemode>
<category isroot="0">Trending</category>
<totallistcount>4</totallistcount>
<startindex>0</startindex>
<listcount>4</listcount>
<timestamp>2018-12-31T16:30:03Z</timestamp>
<menulist>
<menuitem type="2">
<thumbnail>http://cdn-profiles.tunein.com/s297990/images/logot.png</thumbnail>
<description>MSNBC Live with Velshi & Ruhle</description>
<mediaid>s297990</mediaid>
<title>MSNBC</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24940t.png</thumbnail>
<description>Amazing music. Played by an amazing line up.</description>
<mediaid>s24940</mediaid>
<title>BBC Radio 2</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s17077t.png</thumbnail>
<description>Drive with Adrian Durham & Matt Holland</description>
<mediaid>s17077</mediaid>
<title>talkSPORT</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24939t.png</thumbnail>
<description>The best new music</description>
<mediaid>s24939</mediaid>
<title>BBC Radio 1</title>
<contentid>3</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
items = api.get_select_radio_list(3, 0, 30)
self.assertEqual(len(items), 4)
self.assertEqual(items[0], {
'@type': '2',
'thumbnail': 'http://cdn-profiles.tunein.com/s297990/images/logot.png',
'description': 'MSNBC Live with Velshi & Ruhle',
'mediaid': 's297990',
'title': 'MSNBC',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_get_current_radio_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetCurrentRadioList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Browse</root>
<browsemode>0</browsemode>
<category isroot="0">Trending</category>
<totallistcount>4</totallistcount>
<startindex>0</startindex>
<listcount>4</listcount>
<timestamp>2018-12-31T16:30:03Z</timestamp>
<menulist>
<menuitem type="2">
<thumbnail>http://cdn-profiles.tunein.com/s297990/images/logot.png</thumbnail>
<description>MSNBC Live with Velshi & Ruhle</description>
<mediaid>s297990</mediaid>
<title>MSNBC</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24940t.png</thumbnail>
<description>Amazing music. Played by an amazing line up.</description>
<mediaid>s24940</mediaid>
<title>BBC Radio 2</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s17077t.png</thumbnail>
<description>Drive with Adrian Durham & Matt Holland</description>
<mediaid>s17077</mediaid>
<title>talkSPORT</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="2">
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24939t.png</thumbnail>
<description>The best new music</description>
<mediaid>s24939</mediaid>
<title>BBC Radio 1</title>
<contentid>3</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
items = api.get_current_radio_list(0, 30)
self.assertEqual(len(items), 4)
self.assertEqual(items[0], {
'@type': '2',
'thumbnail': 'http://cdn-profiles.tunein.com/s297990/images/logot.png',
'description': 'MSNBC Live with Velshi & Ruhle',
'mediaid': 's297990',
'title': 'MSNBC',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_get_upper_radio_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetUpperRadioList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<root>Browse</root>
<browsemode>0</browsemode>
<category isroot="0">By Language</category>
<totallistcount>4</totallistcount>
<startindex>0</startindex>
<listcount>4</listcount>
<timestamp>2018-12-31T16:23:16Z</timestamp>
<menulist>
<menuitem type="0">
<title>Aboriginal</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="0">
<title>Afrikaans</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="0">
<title>Akan</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="0">
<title>Albanian</title>
<contentid>3</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
items = api.get_upper_radio_list(0, 30)
self.assertEqual(len(items), 4)
self.assertEqual(items[0], {
'@type': '0',
'title': 'Aboriginal',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_set_play_select_single(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetPlaySelect%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22selectitemid%22%20val%3D%220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>StopPlaybackEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<playtime>131</playtime>
</response>
</UIC>"""
)
api = _get_api()
api.set_play_select('0')
@httpretty.activate(allow_net_connect=False)
def test_set_play_select_multiple(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetPlaySelect%3C/name%3E%3Cp%20type%3D%22dec_arr%22%20name%3D%22selectitemids%22%20val%3D%22empty%22%3E%3Citem%3E1%3C/item%3E%3Citem%3E2%3C/item%3E%3Citem%3E3%3C/item%3E%3C/p%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>StopPlaybackEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<playtime>131</playtime>
</response>
</UIC>"""
)
api = _get_api()
api.set_play_select(['1', '2', '3'])
@httpretty.activate(allow_net_connect=False)
def test_get_station_data(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetStationData%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22selectitemid%22%20val%3D%223%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>StationData</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>TuneIn</cpname>
<title>BBC Radio 2</title>
<browsemode>0</browsemode>
<description>Amazing music. Played by an amazing line up.</description>
<thumbnail>http://cdn-radiotime-logos.tunein.com/s24940d.png</thumbnail>
<stationurl>http://opml.radiotime.com/Tune.ashx?id=s24940&partnerId=qDDAbg6M&serial=14BB6E87BBDB&formats=mp3,wma,aac,qt,hls</stationurl>
<timestamp>2019-01-08T15:21:47Z</timestamp>
</response>
</CPM>"""
)
api = _get_api()
station_data = api.get_station_data(3)
self.assertEqual(station_data, {
'cpname': 'TuneIn',
'title': 'BBC Radio 2',
'browsemode': '0',
'description': 'Amazing music. Played by an amazing line up.',
'thumbnail': 'http://cdn-radiotime-logos.tunein.com/s24940d.png',
'stationurl': 'http://opml.radiotime.com/Tune.ashx?id=s24940&partnerId=qDDAbg6M&serial=14BB6E87BBDB&formats=mp3,wma,aac,qt,hls',
'timestamp': '2019-01-08T15:21:47Z',
})
@httpretty.activate(allow_net_connect=False)
def test_get_7band_eq_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGet7BandEQList%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>7BandEQList</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<listcount>5</listcount>
<presetlistcount>4</presetlistcount>
<presetlist>
<preset index="0">
<presetindex>0</presetindex>
<presetname>None</presetname>
</preset>
<preset index="1">
<presetindex>1</presetindex>
<presetname>Pop</presetname>
</preset>
<preset index="2">
<presetindex>2</presetindex>
<presetname>Jazz</presetname>
</preset>
<preset index="3">
<presetindex>3</presetindex>
<presetname>Classic</presetname>
</preset>
<preset index="4">
<presetindex>4</presetindex>
<presetname>customtitle</presetname>
</preset>
</presetlist>
</response>
</UIC>"""
)
api = _get_api()
presets = api.get_7band_eq_list()
self.assertEqual(len(presets), 5)
self.assertEqual(presets[0], {
'@index': '0',
'presetindex': '0',
'presetname': 'None'
})
@httpretty.activate(allow_net_connect=False)
def test_get_current_eq_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetCurrentEQMode%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>CurrentEQMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>3</presetindex>
<presetname>Classic</presetname>
<eqvalue1>2</eqvalue1>
<eqvalue2>0</eqvalue2>
<eqvalue3>0</eqvalue3>
<eqvalue4>5</eqvalue4>
<eqvalue5>0</eqvalue5>
<eqvalue6>1</eqvalue6>
<eqvalue7>0</eqvalue7>
</response>
</UIC>"""
)
api = _get_api()
equalizer = api.get_current_eq_mode()
self.assertEqual(equalizer, {
'presetindex': '3',
'presetname': 'Classic',
'eqvalue1': '2',
'eqvalue2': '0',
'eqvalue3': '0',
'eqvalue4': '5',
'eqvalue5': '0',
'eqvalue6': '1',
'eqvalue7': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_set_7band_eq_value(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESet7bandEQValue%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%224%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue1%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue2%22%20val%3D%222%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue3%22%20val%3D%223%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue4%22%20val%3D%224%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue5%22%20val%3D%225%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue6%22%20val%3D%226%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue7%22%20val%3D%22-6%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>7bandEQValue</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>4</presetindex>
<eqvalue1>1</eqvalue1>
<eqvalue2>2</eqvalue2>
<eqvalue3>3</eqvalue3>
<eqvalue4>4</eqvalue4>
<eqvalue5>5</eqvalue5>
<eqvalue6>6</eqvalue6>
<eqvalue7>-6</eqvalue7>
</response>
</UIC>"""
)
api = _get_api()
api.set_7band_eq_value(4, [1,2,3,4,5,6,-6])
@httpretty.activate(allow_net_connect=False)
def test_set_7band_eq_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESet7bandEQMode%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%221%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>7bandEQMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>1</presetindex>
<presetname>Pop</presetname>
<eqvalue1>0</eqvalue1>
<eqvalue2>-3</eqvalue2>
<eqvalue3>3</eqvalue3>
<eqvalue4>1</eqvalue4>
<eqvalue5>-5</eqvalue5>
<eqvalue6>0</eqvalue6>
<eqvalue7>0</eqvalue7>
</response>
</UIC>"""
)
api = _get_api()
api.set_7band_eq_mode(1)
@httpretty.activate(allow_net_connect=False)
def test_reset_7band_eq_value(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EReset7bandEQValue%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue1%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue2%22%20val%3D%222%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue3%22%20val%3D%223%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue4%22%20val%3D%224%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue5%22%20val%3D%225%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue6%22%20val%3D%226%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22eqvalue7%22%20val%3D%22-6%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>Reset7bandEQValue</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>1</presetindex>
<eqvalue1>1</eqvalue1>
<eqvalue2>2</eqvalue2>
<eqvalue3>3</eqvalue3>
<eqvalue4>4</eqvalue4>
<eqvalue5>5</eqvalue5>
<eqvalue6>6</eqvalue6>
<eqvalue7>-6</eqvalue7>
</response>
</UIC>"""
)
api = _get_api()
api.reset_7band_eq_value(1, [1,2,3,4,5,6,-6])
@httpretty.activate(allow_net_connect=False)
def test_del_custom_eq_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EDelCustomEQMode%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%225%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>DelCustomEQMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>5</presetindex>
<presetname>Custom 2</presetname>
</response>
</UIC>"""
)
api = _get_api()
api.del_custom_eq_mode(5)
@httpretty.activate(allow_net_connect=False)
def test_add_custom_eq_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EAddCustomEQMode%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22presetindex%22%20val%3D%225%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22presetname%22%20val%3D%22my%20custom%20preset%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>AddCustomEQMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<presetindex>5</presetindex>
<presetname>my custom preset</presetname>
</response>
</UIC>"""
)
api = _get_api()
api.add_custom_eq_mode(5, 'my custom preset')
@httpretty.activate(allow_net_connect=False)
def test_set_speaker_time(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetSpeakerTime%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22year%22%20val%3D%222019%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22month%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22day%22%20val%3D%226%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22hour%22%20val%3D%2212%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22min%22%20val%3D%2255%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22sec%22%20val%3D%2224%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SpeakerTime</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<year>2019</year>
<month>1</month>
<day>6</day>
<hour>12</hour>
<min>55</min>
<sec>24</sec>
</response>
</UIC>"""
)
import datetime
api = _get_api()
api.set_speaker_time(datetime.datetime(2019, 1, 6, 12, 55, 24))
@httpretty.activate(allow_net_connect=False)
def test_get_sleep_timer(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetSleepTimer%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SleepTime</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<sleepoption>off</sleepoption>
<sleeptime>0</sleeptime>
</response>
</UIC>"""
)
api = _get_api()
timer = api.get_sleep_timer()
self.assertEqual(timer, {
'sleepoption': 'off',
'sleeptime': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_set_sleep_timer(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetSleepTimer%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22option%22%20val%3D%22start%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22sleeptime%22%20val%3D%22300%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>SleepTime</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<sleepoption>start</sleepoption>
<sleeptime>300</sleeptime>
</response>
</UIC>"""
)
api = _get_api()
api.set_sleep_timer('start', 300)
@httpretty.activate(allow_net_connect=False)
def test_get_alarm_info(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetAlarmInfo%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>AllAlarmInfo</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<totalindexcount>2</totalindexcount>
<alarmList>
<alarm index="0">
<hour>13</hour>
<min>27</min>
<week>0x40</week>
<volume>20</volume>
<title />
<description />
<thumbnail />
<stationurl />
<set>on</set>
<soundenable>on</soundenable>
<sound>1</sound>
<alarmsoundname>Disco</alarmsoundname>
<duration>10</duration>
</alarm>
<alarm index="1">
<hour>14</hour>
<min>25</min>
<week>0x28</week>
<volume>6</volume>
<title><![CDATA[MSNBC]]></title>
<description><![CDATA[MSNBC is the premier...]]></description>
<thumbnail />
<stationurl><![CDATA[http://]]></stationurl>
<set>on</set>
<soundenable>off</soundenable>
<sound>-1</sound>
<alarmsoundname />
<duration>0</duration>
</alarm>
</alarmList>
</response>
</UIC>"""
)
api = _get_api()
alarm_info = api.get_alarm_info()
self.assertEqual(len(alarm_info), 2)
self.assertEqual(alarm_info[0], {
'@index': '0',
'hour': '13',
'min': '27',
'week': '0x40',
'volume': '20',
'title': None,
'description': None,
'thumbnail': None,
'stationurl': None,
'set': 'on',
'soundenable': 'on',
'sound': '1',
'alarmsoundname': 'Disco',
'duration': '10',
})
@httpretty.activate(allow_net_connect=False)
def test_set_alarm_on_off(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetAlarmOnOff%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%220%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22alarm%22%20val%3D%22on%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>AlarmOnOff</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<index>0</index>
<alarm>on</alarm>
</response>
</UIC>"""
)
api = _get_api()
api.set_alarm_on_off(0, 'on')
@httpretty.activate(allow_net_connect=False)
def test_get_alarm_sound_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetAlarmSoundList%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>AlarmSoundList</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<listcount>4</listcount>
<alarmlist>
<alarmsound index="0">
<alarsoundindex>0</alarsoundindex>
<alarmsoundname>Active Morning</alarmsoundname>
</alarmsound>
<alarmsound index="1">
<alarsoundindex>1</alarsoundindex>
<alarmsoundname>Disco</alarmsoundname>
</alarmsound>
<alarmsound index="2">
<alarsoundindex>2</alarsoundindex>
<alarmsoundname>Vintage</alarmsoundname>
</alarmsound>
<alarmsound index="3">
<alarsoundindex>3</alarsoundindex>
<alarmsoundname>Waltz</alarmsoundname>
</alarmsound>
</alarmlist>
</response>
</UIC>"""
)
api = _get_api()
sounds = api.get_alarm_sound_list()
self.assertEqual(len(sounds), 4)
self.assertEqual(sounds[0], {
'@index': '0',
'alarsoundindex': '0',
'alarmsoundname': 'Active Morning',
})
@httpretty.activate(allow_net_connect=False)
def test_set_alarm_info(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetAlarmInfo%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22hour%22%20val%3D%2218%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22min%22%20val%3D%2221%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22week%22%20val%3D%220x1c%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22volume%22%20val%3D%222%22/%3E%3Cp%20type%3D%22cdata%22%20name%3D%22title%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BBBC%20Radio%204%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22cdata%22%20name%3D%22description%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BIntelligent%20speech%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22cdata%22%20name%3D%22thumbnail%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5Bhttp%3A//cdn-radiotime-logos.tunein.com/s25419d.png%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22cdata%22%20name%3D%22stationurl%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5Bhttp%3A//opml.radiotime.com/Tune.ashx%3Fid%3Ds25419%26partnerId%3DqDDAbg6M%26serial%3D90F1AAD31D82%26formats%3Dmp3%2Cwma%2Caac%2Cqt%2Chls%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22str%22%20name%3D%22soundenable%22%20val%3D%22off%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22sound%22%20val%3D%22-1%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22duration%22%20val%3D%220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>AlarmInfo</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<index>0</index>
<hour>18</hour>
<min>21</min>
<week>0x1c</week>
<volume>2</volume>
<title><![CDATA[BBC Radio 4]]></title>
<description><![CDATA[Intelligent speech]]></description>
<thumbnail><![CDATA[http://cdn-radiotime-logos.tunein.com/s25419d.png]]></thumbnail>
<stationurl><![CDATA[http://opml.radiotime.com/Tune.ashx?id=s25419&partnerId=qDDAbg6M&serial=90F1AAD31D82&formats=mp3,wma,aac,qt,hls]]></stationurl>
<alarm>on</alarm>
<soundenable>off</soundenable>
<sound>-1</sound>
<duration>0</duration>
</response>
</UIC>"""
)
api = _get_api()
api.set_alarm_info(
index=0,
hour=18,
minute=21,
week='0x1C',
duration=0,
volume=2,
station_data={
'title': 'BBC Radio 4',
'description': 'Intelligent speech',
'thumbnail': 'http://cdn-radiotime-logos.tunein.com/s25419d.png',
'stationurl': 'http://opml.radiotime.com/Tune.ashx?id=s25419&partnerId=qDDAbg6M&serial=90F1AAD31D82&formats=mp3,wma,aac,qt,hls',
}
)
@httpretty.activate(allow_net_connect=False)
def test_del_alarm(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EDelAlarm%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22totaldelnum%22%20val%3D%224%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%222%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%224%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>DelAlarm</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier />
<response result="ok">
<index>0</index>
<index>1</index>
<index>2</index>
</response>
</UIC>"""
)
api = _get_api()
api.del_alarm([0, 1, 2, 4])
@unittest.skip('API call doesn\'t give any response')
def test_spk_in_group(self):
api = SamsungMultiroomApi(ip, 55001)
api.spk_in_group('select')
@httpretty.activate(allow_net_connect=False)
def test_set_multispk_group(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetMultispkGroup%3C/name%3E%3Cp%20type%3D%22cdata%22%20name%3D%22name%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BTest%20group%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22dec%22%20name%3D%22index%22%20val%3D%221%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22type%22%20val%3D%22main%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22spknum%22%20val%3D%223%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22audiosourcemacaddr%22%20val%3D%2200%3A00%3A00%3A00%3A00%3A00%22/%3E%3Cp%20type%3D%22cdata%22%20name%3D%22audiosourcename%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BLiving%20Room%5D%5D%3E%3C/p%3E%3Cp%20type%3D%22str%22%20name%3D%22audiosourcetype%22%20val%3D%22speaker%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22subspkip%22%20val%3D%22192.168.1.165%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22subspkmacaddr%22%20val%3D%2211%3A11%3A11%3A11%3A11%3A11%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22subspkip%22%20val%3D%22192.168.1.216%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22subspkmacaddr%22%20val%3D%2222%3A22%3A22%3A22%3A22%3A22%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MultispkGroupStartEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<groupname><![CDATA[Test group]]></groupname>
<grouptype>M</grouptype>
</response>
</UIC>"""
)
api = _get_api()
api.set_multispk_group('Test group', [
{
'name': 'Living Room',
'ip': '192.168.1.129',
'mac': '00:00:00:00:00:00',
},
{
'name': 'Kitchen',
'ip': '192.168.1.165',
'mac': '11:11:11:11:11:11',
},
{
'name': 'Bedroom',
'ip': '192.168.1.216',
'mac': '22:22:22:22:22:22',
}
])
@httpretty.activate(allow_net_connect=False)
def test_set_ungroup(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetUngroup%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>Ungroup</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok" />
</UIC>"""
)
api = _get_api()
api.set_ungroup()
@httpretty.activate(allow_net_connect=False)
def test_set_group_name(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetGroupName%3C/name%3E%3Cp%20type%3D%22cdata%22%20name%3D%22groupname%22%20val%3D%22empty%22%3E%3C%21%5BCDATA%5BUpdated%20group%20name%5D%5D%3E%3C/p%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>GroupName</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<groupname><![CDATA[Updated group name]]></groupname>
</response>
</UIC>"""
)
api = _get_api()
api.set_group_name('Updated group name')
@httpretty.activate(allow_net_connect=False)
def test_get_cp_list(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetCpList%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22liststartindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>CpList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<listtotalcount>24</listtotalcount>
<liststartindex>0</liststartindex>
<listcount>24</listcount>
<cplist>
<cp>
<cpid>0</cpid>
<cpname>Pandora</cpname>
<signinstatus>0</signinstatus>
</cp>
<cp>
<cpid>1</cpid>
<cpname>Spotify</cpname>
<signinstatus>0</signinstatus>
</cp>
<cp>
<cpid>2</cpid>
<cpname>Deezer</cpname>
<signinstatus>1</signinstatus>
<username>test_username</username>
</cp>
</cplist>
</response>
</CPM>"""
)
api = _get_api()
cps = api.get_cp_list(0, 30)
self.assertEqual(len(cps), 3)
self.assertEqual(cps[0], {
'cpid': '0',
'cpname': 'Pandora',
'signinstatus': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_set_cp_service(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetCpService%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22cpservice_id%22%20val%3D%222%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="utf-8" ?>
<CPM>
<method>CpChanged</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
</response>
</CPM>"""
)
api = _get_api()
api.set_cp_service(2)
@httpretty.activate(allow_net_connect=False)
def test_get_cp_info(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetCpInfo%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>CpInfo</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T09:50:46Z</timestamp>
<category />
<signinstatus>1</signinstatus>
<username>test_username</username>
<subscription_info>Listening is limited to 30-second clips. Subscribe to enjoy unlimited music!</subscription_info>
<audioinfo>
<title>Introduction And Yaqui Indian Folk Song</title>
<streamtype>station</streamtype>
<thumbnail>https://e-cdns-images.dzcdn.net/images/cover/a9b4964ab775575efa2719827b9e88b9/500x500-000000-80-0-0.jpg</thumbnail>
<playstatus>play</playstatus>
</audioinfo>
</response>
</CPM>"""
)
api = _get_api()
cp_info = api.get_cp_info()
self.assertEqual(cp_info, {
'cpname': 'Deezer',
'timestamp': '2019-01-14T09:50:46Z',
'category': None,
'signinstatus': '1',
'username': 'test_username',
'subscription_info': 'Listening is limited to 30-second clips. Subscribe to enjoy unlimited music!',
'audioinfo': {
'title': 'Introduction And Yaqui Indian Folk Song',
'streamtype': 'station',
'thumbnail': 'https://e-cdns-images.dzcdn.net/images/cover/a9b4964ab775575efa2719827b9e88b9/500x500-000000-80-0-0.jpg',
'playstatus': 'play',
},
})
@httpretty.activate(allow_net_connect=False)
def test_set_sign_in(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetSignIn%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22username%22%20val%3D%22test_username%22/%3E%3Cp%20type%3D%22str%22%20name%3D%22password%22%20val%3D%22test_password%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>SignInStatus</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T10:09:49Z</timestamp>
<category isroot="1" />
<category_localized />
<signinstatus>1</signinstatus>
<root>Playlist Picks</root>
<root_index>2</root_index>
<root_localized>Playlist Picks</root_localized>
</response>
</CPM>"""
)
api = _get_api()
api.set_sign_in('test_username', 'test_password')
@httpretty.activate(allow_net_connect=False)
def test_set_sign_out(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetSignOut%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>SignOutStatus</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T10:17:05Z</timestamp>
<category isroot="1" />
<category_localized />
<signoutstatus>1</signoutstatus>
</response>
</CPM>"""
)
api = _get_api()
api.set_sign_out()
@httpretty.activate(allow_net_connect=False)
def test_get_cp_submenu(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetCpSubmenu%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>SubMenu</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T10:23:16Z</timestamp>
<totallistcount>10</totallistcount>
<submenu selected_id="0">
<submenuitem id="0">
<submenuitem_localized><![CDATA[Flow]]></submenuitem_localized>
</submenuitem>
<submenuitem id="1">
<submenuitem_localized><![CDATA[Browse]]></submenuitem_localized>
</submenuitem>
<submenuitem id="2">
<submenuitem_localized><![CDATA[Playlist Picks]]></submenuitem_localized>
</submenuitem>
</submenu>
</response>
</CPM>"""
)
api = _get_api()
submenu = api.get_cp_submenu()
self.assertEqual(len(submenu), 3)
self.assertEqual(submenu[0], {
'@id': '0',
'submenuitem_localized': 'Flow',
})
@httpretty.activate(allow_net_connect=False)
def test_set_select_cp_submenu(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetSelectCpSubmenu%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22contentid%22%20val%3D%221%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2210%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T10:40:56Z</timestamp>
<root>Browse</root>
<root_index>1</root_index>
<root_localized>Browse</root_localized>
<category isroot="1">Genres</category>
<category_localized>Genres</category_localized>
<totallistcount>23</totallistcount>
<startindex>0</startindex>
<listcount>10</listcount>
<menulist>
<menuitem type="0">
<title>All</title>
<contentid>0</contentid>
</menuitem>
<menuitem type="0">
<title>Pop</title>
<contentid>1</contentid>
</menuitem>
<menuitem type="0">
<title>Rap/Hip Hop</title>
<contentid>2</contentid>
</menuitem>
<menuitem type="0">
<title>Rock</title>
<contentid>3</contentid>
</menuitem>
<menuitem type="0">
<title>Dance</title>
<contentid>4</contentid>
</menuitem>
<menuitem type="0">
<title>R&B</title>
<contentid>5</contentid>
</menuitem>
<menuitem type="0">
<title>Alternative</title>
<contentid>6</contentid>
</menuitem>
<menuitem type="0">
<title>Electro</title>
<contentid>7</contentid>
</menuitem>
<menuitem type="0">
<title>Folk</title>
<contentid>8</contentid>
</menuitem>
<menuitem type="0">
<title>Reggae</title>
<contentid>9</contentid>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
submenu = api.set_select_cp_submenu(1, 0, 10)
self.assertEqual(len(submenu), 10)
self.assertEqual(submenu[0], {
'@type': '0',
'title': 'All',
'contentid': '0',
})
@httpretty.activate(allow_net_connect=False)
def test_get_cp_player_playlist(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3EGetCpPlayerPlaylist%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22startindex%22%20val%3D%220%22/%3E%3Cp%20type%3D%22dec%22%20name%3D%22listcount%22%20val%3D%2230%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>RadioPlayList</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T11:10:39Z</timestamp>
<root>Playlist Picks</root>
<root_index>2</root_index>
<root_localized>Playlist Picks</root_localized>
<category isroot="0">Playlist</category>
<category_localized>Playlist</category_localized>
<totallistcount>3</totallistcount>
<startindex>0</startindex>
<listcount>3</listcount>
<menulist>
<menuitem type="1" available="1" currentplaying="1">
<artist>Madeleine Peyroux</artist>
<album>Careless Love</album>
<mediaid>881851</mediaid>
<tracklength>0</tracklength>
<title>Don't Wait Too Long</title>
<contentid>0</contentid>
<thumbnail>http://api.deezer.com/album/100127/image</thumbnail>
</menuitem>
<menuitem type="1" available="1">
<artist>Marcus Strickland's Twi-Life</artist>
<album>Nihil Novi</album>
<mediaid>122883722</mediaid>
<tracklength>0</tracklength>
<title>Cycle</title>
<contentid>1</contentid>
<thumbnail>http://api.deezer.com/album/12864776/image</thumbnail>
</menuitem>
<menuitem type="1" available="1">
<artist>Bill Evans Trio</artist>
<album>Everybody Digs Bill Evans (Remastered)</album>
<mediaid>4156086</mediaid>
<tracklength>0</tracklength>
<title>What Is There To Say? (Album Version)</title>
<contentid>2</contentid>
<thumbnail>http://api.deezer.com/album/387401/image</thumbnail>
</menuitem>
</menulist>
</response>
</CPM>"""
)
api = _get_api()
playlist = api.get_cp_player_playlist(0, 30)
self.assertEqual(len(playlist), 3)
self.assertEqual(playlist[0], {
'@type': '1',
'@available': '1',
'@currentplaying': '1',
'artist': 'Madeleine Peyroux',
'album': 'Careless Love',
'mediaid': '881851',
'tracklength': '0',
'title': 'Don\'t Wait Too Long',
'contentid': '0',
'thumbnail': 'http://api.deezer.com/album/100127/image',
})
@httpretty.activate(allow_net_connect=False)
def test_set_skip_current_track(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetSkipCurrentTrack%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<CPM>
<method>SkipInfo</method>
<version>0.1</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>407c385a-17ef-11e9-b3ee-48e244f52360</user_identifier>
<response result="ok">
<cpname>Deezer</cpname>
<timestamp>2019-01-14T11:21:25Z</timestamp>
<category isroot="1" />
<category_localized />
<skipstatus>1</skipstatus>
<root>Flow</root>
<root_index>0</root_index>
<root_localized>Flow</root_localized>
</response>
</CPM>"""
)
api = _get_api()
api.set_skip_current_track()
@httpretty.activate(allow_net_connect=False)
def test_get_current_play_time(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetCurrentPlayTime%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>MusicPlayTime</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<timelength>168</timelength>
<playtime>121</playtime>
</response>
</UIC>"""
)
api = _get_api()
play_time = api.get_current_play_time()
self.assertEqual(play_time, {
'timelength': '168',
'playtime': '121',
})
@httpretty.activate(allow_net_connect=False)
def test_set_play_cp_playlist_track(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/CPM?cmd=%3Cname%3ESetPlayCpPlaylistTrack%3C/name%3E%3Cp%20type%3D%22dec%22%20name%3D%22selectitemid%22%20val%3D%220%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>StopPlaybackEvent</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<playtime>3</playtime>
</response>
</UIC>"""
)
api = _get_api()
api.set_play_cp_playlist_track(0)
@httpretty.activate(allow_net_connect=False)
def test_get_repeat_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3EGetRepeatMode%3C/name%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>RepeatMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<repeat>off</repeat>
</response>
</UIC>"""
)
api = _get_api()
repeat_mode = api.get_repeat_mode()
self.assertEqual(repeat_mode, 'off')
@httpretty.activate(allow_net_connect=False)
def test_set_repeat_mode(self):
httpretty.register_uri(
httpretty.GET,
'http://192.168.1.129:55001/UIC?cmd=%3Cname%3ESetRepeatMode%3C/name%3E%3Cp%20type%3D%22str%22%20name%3D%22repeatmode%22%20val%3D%22one%22/%3E',
match_querystring=True,
body="""<?xml version="1.0" encoding="UTF-8"?>
<UIC>
<method>RepeatMode</method>
<version>1.0</version>
<speakerip>192.168.1.129</speakerip>
<user_identifier>public</user_identifier>
<response result="ok">
<repeat>one</repeat>
</response>
</UIC>"""
)
api = _get_api()
api.set_repeat_mode('one')
| 46.294468
| 1,259
| 0.489282
| 10,580
| 108,792
| 4.930624
| 0.081474
| 0.011655
| 0.020128
| 0.025496
| 0.79316
| 0.758923
| 0.726834
| 0.701185
| 0.687919
| 0.630391
| 0
| 0.115516
| 0.377905
| 108,792
| 2,349
| 1,260
| 46.314176
| 0.65527
| 0
| 0
| 0.652275
| 0
| 0.038533
| 0.743636
| 0.166526
| 0
| 0
| 0.000186
| 0
| 0.029712
| 1
| 0.032962
| false
| 0.000929
| 0.006035
| 0.000464
| 0.039926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4252a73ac5bfc9f57f305e588d8bf00c7b953753
| 68
|
py
|
Python
|
lib/__init__.py
|
harshareddy794/web-scanner
|
c71157991810d1288029705a3dfec17fcb869230
|
[
"Apache-2.0"
] | null | null | null |
lib/__init__.py
|
harshareddy794/web-scanner
|
c71157991810d1288029705a3dfec17fcb869230
|
[
"Apache-2.0"
] | null | null | null |
lib/__init__.py
|
harshareddy794/web-scanner
|
c71157991810d1288029705a3dfec17fcb869230
|
[
"Apache-2.0"
] | null | null | null |
from lib.port_scanner import scanner
from lib.spyder import crawler
| 34
| 37
| 0.852941
| 11
| 68
| 5.181818
| 0.636364
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 38
| 34
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4253eabb486b805e15d3e45d643290e0bbeba860
| 57
|
py
|
Python
|
tests/test_import.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 40
|
2020-10-19T14:08:05.000Z
|
2021-11-19T10:44:52.000Z
|
tests/test_import.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 51
|
2020-09-21T19:59:03.000Z
|
2021-11-12T09:19:00.000Z
|
tests/test_import.py
|
DocLM/pynonymizer
|
1ab2b6323a2b7324fef3a4224231329936a2356f
|
[
"MIT"
] | 19
|
2020-10-20T13:18:41.000Z
|
2021-11-11T13:22:00.000Z
|
def test_main_imports():
from pynonymizer import run
| 19
| 31
| 0.77193
| 8
| 57
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175439
| 57
| 2
| 32
| 28.5
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c40f6dd663fc398171fee772ad854c042a3bc538
| 3,490
|
py
|
Python
|
asyncapi_schema_pydantic/v2_3_0/web_sockets_bindings.py
|
albertnadal/asyncapi-schema-pydantic
|
83966bdc11f2d465a10b52cec5ff79d18fa6f5fe
|
[
"MIT"
] | null | null | null |
asyncapi_schema_pydantic/v2_3_0/web_sockets_bindings.py
|
albertnadal/asyncapi-schema-pydantic
|
83966bdc11f2d465a10b52cec5ff79d18fa6f5fe
|
[
"MIT"
] | null | null | null |
asyncapi_schema_pydantic/v2_3_0/web_sockets_bindings.py
|
albertnadal/asyncapi-schema-pydantic
|
83966bdc11f2d465a10b52cec5ff79d18fa6f5fe
|
[
"MIT"
] | null | null | null |
from typing import Optional
from enum import Enum
from pydantic import BaseModel, Extra
from .schema import Schema
class WebSocketsMethod(str, Enum):
get = 'GET'
post = 'POST'
class WebSocketsChannelBinding(BaseModel):
"""
When using WebSockets, the channel represents the connection. Unlike other
protocols that support multiple virtual channels (topics, routing keys, etc.)
per connection, WebSockets doesn't support virtual channels or, put it another
way, there's only one channel and its characteristics are strongly related to
the protocol used for the handshake, i.e., HTTP.
"""
method: Optional[WebSocketsMethod] = None
"""
The HTTP method to use when establishing the connection. Its value MUST be
either GET or POST.
"""
query: Optional[Schema] = None
"""
A Schema object containing the definitions for each query parameter. This
schema MUST be of type object and have a properties key.
"""
headers: Optional[Schema] = None
"""
A Schema object containing the definitions of the HTTP headers to use when
establishing the connection. This schema MUST be of type object and have a
properties key.
"""
bindingVersion: Optional[str] = None
"""
The version of this binding. If omitted, "latest" MUST be assumed.
"""
class WebSocketsMessageBinding(BaseModel):
"""
This document defines how to describe WebSockets-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class WebSocketsOperationBinding(BaseModel):
"""
This document defines how to describe WebSockets-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class WebSocketsServerBinding(BaseModel):
"""
This document defines how to describe WebSockets-specific information on AsyncAPI.
This object MUST NOT contain any properties. Its name is reserved for future use.
"""
class Config:
extra = Extra.forbid
class WebSocketsChannelBinding(BaseModel):
"""
This document defines how to describe WebSockets-specific information on AsyncAPI.
When using WebSockets, the channel represents the connection. Unlike other protocols
that support multiple virtual channels (topics, routing keys, etc.) per connection,
WebSockets doesn't support virtual channels or, put it another way, there's only one
channel and its characteristics are strongly related to the protocol used for the
handshake, i.e., HTTP.
"""
method: Optional[WebSocketsMethod] = None
"""
The HTTP method to use when establishing the connection.
Its value MUST be either GET or POST.
"""
query: Optional[Schema] = None
"""
A Schema object containing the definitions for each query parameter.
This schema MUST be of type object and have a properties key.
"""
headers: Optional[Schema] = None
"""
A Schema object containing the definitions of the HTTP headers to use
when establishing the connection. This schema MUST be of type object
and have a properties key.
"""
bindingVersion: Optional[str] = None
"""
The version of this binding. If omitted, "latest" MUST be assumed.
"""
class Config:
extra = Extra.forbid
| 29.576271
| 88
| 0.705731
| 442
| 3,490
| 5.572398
| 0.246606
| 0.019488
| 0.014616
| 0.034105
| 0.88916
| 0.880227
| 0.880227
| 0.880227
| 0.880227
| 0.880227
| 0
| 0
| 0.231519
| 3,490
| 117
| 89
| 29.82906
| 0.918345
| 0.372206
| 0
| 0.642857
| 0
| 0
| 0.007028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c41e02d072565bccfb2d540639fe976b16426bd2
| 3,248
|
py
|
Python
|
validation.py
|
RoRyou/lost_
|
c0fd3be6808edb126974f606285e15332849f8be
|
[
"Apache-2.0"
] | null | null | null |
validation.py
|
RoRyou/lost_
|
c0fd3be6808edb126974f606285e15332849f8be
|
[
"Apache-2.0"
] | null | null | null |
validation.py
|
RoRyou/lost_
|
c0fd3be6808edb126974f606285e15332849f8be
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
df5 = pd.read_csv('D:/data/final_data_5.csv')
df6 = pd.read_csv('D:/data/final_data_6.csv')
df7 = pd.read_csv('D:/data/final_data_7.csv')
df8 = pd.read_csv('D:/data/final_data_8.csv')
df9 = pd.read_csv('D:/data/final_data_9.csv')
df10 = pd.read_csv('D:/data/final_data_10.csv')
dict = {}
for key in df10['LOST'].tolist():
dict[key] = dict.get(key, 0) + 1
print('A5服务器总流失率')
print("%.2f%%" %(dict[1]/(dict[0]+dict[1])))
# print('------------')
# ACT_lists=['isparty', 'isXMSL','isLYQ','isKTT','isXMHJ','isSYZC','isPTY','isFBJL','isFBRH','dyLMZ', 'dyXMSL', 'dyLYQ', 'dyKTT', 'dyXMHJ', 'dySYZC', 'dyPTY', 'dyFBJL', 'dyFBRH', 'dybanggong','dyfee', 'isfee', 'dyForge_time', 'dyrate', 'dykilltimes','dykilledtimes']
# for ACT in ACT_lists:
# dict = {}
# for key in df5[df5[ACT] == 0]['LOST'].tolist():
# dict[key] = dict.get(key, 0) + 1
# if dict[1] == dict[0] + dict[1]:
# print('所有人都参与了该活动')
# else:
# print('未玩过', ACT, '该活动流失率')
# print("%.2f%%" %(dict[1]/(dict[0]+dict[1])))
#
# dict = {}
# for key in df5[df5[ACT] == 1]['LOST'].tolist():
# dict[key] = dict.get(key, 0) + 1
# if dict[1] == dict[0] + dict[1]:
# print('所有人都参与了该活动')
# else:
# print('玩过',ACT,'该活动流失率')
# print("%.2f%%" %(dict[1]/(dict[0]+dict[1])))
# print('------------')
#
# print('------------')
# ACT_lists=['isparty', 'dyLMZ', 'dyXMSL', 'isXMSL', 'dyLYQ','isLYQ', 'dyKTT', 'isKTT', 'dyXMHJ', 'isXMHJ', 'dySYZC', 'isSYZC','dyPTY', 'isPTY', 'dyFBJL', 'isFBJL', 'dyFBRH', 'isFBRH', 'dybanggong','dyfee', 'isfee', 'dyForge_time', 'dyrate', 'dykilltimes','dykilledtimes']
# for ACT in ACT_lists:
# dict = {}
# for key in df6[df6[ACT] == 0]['LOST'].tolist():
# dict[key] = dict.get(key, 0) + 1
# if dict[1] == dict[0] + dict[1]:
# print('所有人都参与了该活动')
# else:
# print('未玩过', ACT, '该活动流失率')
# print(dict[1]/(dict[0]+dict[1]))
#
# dict = {}
# for key in df6[df6[ACT] == 1]['LOST'].tolist():
# dict[key] = dict.get(key, 0) + 1
# if dict[1] == dict[0] + dict[1]:
# print('所有人都参与了该活动')
# else:
# print('玩过',ACT,'该活动流失率')
# print(dict[1]/(dict[0]+dict[1]))
# print('------------')
print('------------')
ACT_lists=['isparty', 'isXMSL','isLYQ', 'isKTT','isXMHJ','isSYZC','isPTY','isFBJL','isFBRH','dyLMZ', 'dyXMSL', 'dyLYQ', 'dyKTT', 'dyXMHJ', 'dySYZC', 'dyPTY', 'dyFBJL', 'dyFBRH', 'dybanggong','dyfee', 'isfee', 'dyForge_time', 'dyrate', 'dykilltimes','dykilledtimes']
for ACT in ACT_lists:
dict = {}
for key in df5[df5[ACT] == 0]['LOST'].tolist():
dict[key] = dict.get(key, 0) + 1
if dict[1] == dict[0] + dict[1]:
print('所有人都参与了该活动')
else:
print('未玩过', ACT, '该活动流失率')
print("%.2f%%" %(dict[1]/(dict[0]+dict[1])))
dict = {}
for key in df5[df5[ACT] == 1]['LOST'].tolist():
dict[key] = dict.get(key, 0) + 1
if dict[1] == dict[0] + dict[1]:
print('所有人都参与了该活动')
else:
print('玩过',ACT,'该活动流失率')
print("%.2f%%" %(dict[1]/(dict[0]+dict[1])))
print('------------')
| 39.13253
| 274
| 0.506773
| 425
| 3,248
| 3.809412
| 0.157647
| 0.080296
| 0.088944
| 0.080296
| 0.885732
| 0.885732
| 0.885732
| 0.793082
| 0.793082
| 0.761581
| 0
| 0.036919
| 0.224446
| 3,248
| 83
| 275
| 39.13253
| 0.605796
| 0.536946
| 0
| 0.53125
| 0
| 0
| 0.296592
| 0.105149
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0.3125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c48168f80c7427a7a5d9e3479c339e3caba1294b
| 56,840
|
py
|
Python
|
nipy/algorithms/statistics/models/tests/test_olsR.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 236
|
2015-01-09T21:28:37.000Z
|
2022-03-27T11:51:58.000Z
|
nipy/algorithms/statistics/models/tests/test_olsR.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 171
|
2015-03-23T00:31:43.000Z
|
2021-11-22T12:43:00.000Z
|
nipy/algorithms/statistics/models/tests/test_olsR.py
|
bpinsard/nipy
|
d49e8292adad6619e3dac710752131b567efe90e
|
[
"BSD-3-Clause"
] | 94
|
2015-02-01T12:39:47.000Z
|
2022-01-27T06:38:19.000Z
|
from __future__ import absolute_import
import numpy as np
from ..regression import OLSModel
import nipy.testing as niptest
import scipy.stats
from .exampledata import x, y
Rscript = '''
d = read.table('data.csv', header=T, sep=' ')
y.lm = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14, data=d)
print(summary(y.lm))
y.lm2 = lm(Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 - 1, data=d)
print(summary(y.lm2))
SSE = sum(resid(y.lm)^2)
SST = sum((d$Y - mean(d$Y))^2)
SSR = SST - SSE
print(data.frame(SSE, SST, SSR))
MSE = SSE / y.lm$df.resid
MST = SST / (length(d$Y) - 1)
MSR = SSR / (length(d$Y) - y.lm$df.resid - 1)
print(data.frame(MSE, MST, MSR))
print(AIC(y.lm))
print(AIC(y.lm2))
'''
# lines about "Signif. codes" were deleted due to a character encoding issue
Rresults = \
"""
These are the results from fitting the model in R, i.e. running the commands Rscript in R
A few things to note, X8 is a column of 1s,
so by not including a '-1' in the formula, X8 gets
thrown out of the model, with its coefficients
being the "(Intercept)" term. An alternative is to use "-1"
in the formula, but then R gives nonsensical F, R2 and adjusted R2
values. This means that R2, R2a and F cannot fully be trusted in R.
In OLSModel, we have checked whether a column of 1s is in the column
space, in which case the F, R2, and R2a are seneible.
> source('test.R')
[1] "Without using '-1'"
[1] "------------------"
Call:
lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 +
X10 + X11 + X12 + X13 + X14, data = d)
Residuals:
Min 1Q Median 3Q Max
-2.125783 -0.567850 0.004305 0.532145 2.372263
Coefficients: (1 not defined because of singularities)
Estimate Std. Error t value Pr(>|t|)
(Intercept) 2.603e+02 8.226e-01 316.463 < 2e-16 ***
X1 1.439e-02 2.649e-02 0.543 0.5881
X2 -6.975e+00 1.022e+01 -0.683 0.4963
X3 4.410e+01 5.740e+00 7.682 6.42e-12 ***
X4 3.864e+00 5.770e+00 0.670 0.5044
X5 2.458e+02 4.594e+02 0.535 0.5937
X6 9.789e+02 3.851e+02 2.542 0.0124 *
X7 1.339e+03 8.418e+02 1.591 0.1145
X8 NA NA NA NA
X9 -1.955e-02 1.539e-02 -1.270 0.2066
X10 7.042e-05 2.173e-04 0.324 0.7465
X11 -3.743e-08 6.770e-07 -0.055 0.9560
X12 3.060e-06 2.094e-06 1.461 0.1469
X13 1.440e-06 1.992e-06 0.723 0.4711
X14 -1.044e-05 7.215e-06 -1.448 0.1505
---
Residual standard error: 0.8019 on 112 degrees of freedom
Multiple R-squared: 0.5737,Adjusted R-squared: 0.5242
F-statistic: 11.59 on 13 and 112 DF, p-value: 1.818e-15
[1] "Using '-1'"
[1] "------------------"
Call:
lm(formula = Y ~ X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 +
X10 + X11 + X12 + X13 + X14 - 1, data = d)
Residuals:
Min 1Q Median 3Q Max
-2.125783 -0.567850 0.004305 0.532145 2.372263
Coefficients:
Estimate Std. Error t value Pr(>|t|)
X1 1.439e-02 2.649e-02 0.543 0.5881
X2 -6.975e+00 1.022e+01 -0.683 0.4963
X3 4.410e+01 5.740e+00 7.682 6.42e-12 ***
X4 3.864e+00 5.770e+00 0.670 0.5044
X5 2.458e+02 4.594e+02 0.535 0.5937
X6 9.789e+02 3.851e+02 2.542 0.0124 *
X7 1.339e+03 8.418e+02 1.591 0.1145
X8 2.603e+02 8.226e-01 316.463 < 2e-16 ***
X9 -1.955e-02 1.539e-02 -1.270 0.2066
X10 7.042e-05 2.173e-04 0.324 0.7465
X11 -3.743e-08 6.770e-07 -0.055 0.9560
X12 3.060e-06 2.094e-06 1.461 0.1469
X13 1.440e-06 1.992e-06 0.723 0.4711
X14 -1.044e-05 7.215e-06 -1.448 0.1505
---
Residual standard error: 0.8019 on 112 degrees of freedom
Multiple R-squared: 1,Adjusted R-squared: 1
F-statistic: 9.399e+05 on 14 and 112 DF, p-value: < 2.2e-16
SSE SST SSR
1 72.02328 168.9401 96.91685
MSE MST MSR
1 0.643065 1.351521 7.455142
[1] "AIC"
[1] 317.1017
[1] "BIC"
[1] 359.6459
"""
def test_results():
m = OLSModel(x)
r = m.fit(y)
# results hand compared with R's printout
yield niptest.assert_equal, '%0.4f' % r.R2, '0.5737'
yield niptest.assert_equal, '%0.4f' % r.R2_adj, '0.5242'
f = r.F_overall
yield niptest.assert_equal, '%0.2f' % f['F'], '11.59'
yield niptest.assert_equal, f['df_num'], 13
yield niptest.assert_equal, f['df_den'], 112
yield niptest.assert_equal, '%0.3e' % f['p_value'], '1.818e-15'
# test Fcontrast, the 8th column of m.design is all 1s
# let's construct a contrast matrix that tests everything
# but column 8 is zero
M = np.identity(14)
M = np.array([M[i] for i in [0,1,2,3,4,5,6,8,9,10,11,12,13]])
Fc = r.Fcontrast(M)
yield niptest.assert_array_almost_equal, [Fc.F], [f['F']], 6
yield niptest.assert_array_almost_equal, [Fc.df_num], [f['df_num']], 6
yield niptest.assert_array_almost_equal, [Fc.df_den], [f['df_den']], 6
thetas = []
sds = []
ts = []
ps = []
# the model has an intercept
yield niptest.assert_true, r.model.has_intercept
# design matrix has full rank
yield niptest.assert_equal, r.model.rank, 14
# design matrix has full rank
yield niptest.assert_equal, r.df_model, 14
yield niptest.assert_equal, r.df_total, 126
yield niptest.assert_equal, r.df_resid, 112
# entries with '*****' are not tested as they were a different format
resultstr = \
'''
X1 1.439e-02 2.649e-02 0.543 0.5881
X2 -6.975e+00 1.022e+01 -0.683 0.4963
X3 4.410e+01 5.740e+00 7.682 ******
X4 3.864e+00 5.770e+00 0.670 0.5044
X5 2.458e+02 4.594e+02 0.535 0.5937
X6 9.789e+02 3.851e+02 2.542 0.0124
X7 1.339e+03 8.418e+02 1.591 0.1145
X8 2.603e+02 8.226e-01 316.463 ******
X9 -1.955e-02 1.539e-02 -1.270 0.2066
X10 7.042e-05 2.173e-04 0.324 0.7465
X11 -3.743e-08 6.770e-07 -0.055 0.9560
X12 3.060e-06 2.094e-06 1.461 0.1469
X13 1.440e-06 1.992e-06 0.723 0.4711
X14 -1.044e-05 7.215e-06 -1.448 0.1505
X1 1.439e-02 2.649e-02 0.543 0.5881
X2 -6.975e+00 1.022e+01 -0.683 0.4963
X3 4.410e+01 5.740e+00 7.682 ******
X4 3.864e+00 5.770e+00 0.670 0.5044
X5 2.458e+02 4.594e+02 0.535 0.5937
X6 9.789e+02 3.851e+02 2.542 0.0124
X7 1.339e+03 8.418e+02 1.591 0.1145
X8 2.603e+02 8.226e-01 316.463 ******
X9 -1.955e-02 1.539e-02 -1.270 0.2066
X10 7.042e-05 2.173e-04 0.324 0.7465
X11 -3.743e-08 6.770e-07 -0.055 0.9560
X12 3.060e-06 2.094e-06 1.461 0.1469
X13 1.440e-06 1.992e-06 0.723 0.4711
X14 -1.044e-05 7.215e-06 -1.448 0.1505
'''
for row in resultstr.strip().split('\n'):
row = row.strip()
_, th, sd, t, p = row.split()
thetas.append(th)
sds.append(sd)
ts.append(t)
ps.append(p)
for th, thstr in zip(r.theta, thetas):
yield niptest.assert_equal, '%0.3e' % th, thstr
for sd, sdstr in zip([np.sqrt(r.vcov(column=i)) for i in range(14)], sds):
yield niptest.assert_equal, '%0.3e' % sd, sdstr
for t, tstr in zip([r.t(column=i) for i in range(14)], ts):
yield niptest.assert_equal, '%0.3f' % t, tstr
for i, t in enumerate([r.t(column=i) for i in range(14)]):
m = np.zeros((14,))
m[i] = 1.
tv = r.Tcontrast(m)
e = r.theta[i]
sd = np.sqrt(r.vcov(column=i))
yield niptest.assert_almost_equal, tv.t, t, 6
yield niptest.assert_almost_equal, tv.sd, sd, 6
yield niptest.assert_almost_equal, tv.effect, e, 6
for p, pstr in zip([2*scipy.stats.t.sf(np.fabs(r.t(column=i)), r.df_resid) for i in range(14)], ps):
if pstr.find('*') < 0:
yield niptest.assert_equal, '%0.4f' % p, pstr
yield niptest.assert_equal, "%0.5f" % r.SSE, "72.02328"
yield niptest.assert_equal, "%0.4f" % r.SST, "168.9401"
yield niptest.assert_equal, "%0.5f" % r.SSR, "96.91685"
yield niptest.assert_equal, "%0.6f" % r.MSE, "0.643065"
yield niptest.assert_equal, "%0.6f" % r.MST, "1.351521"
yield niptest.assert_equal, "%0.6f" % r.MSR, "7.455142"
yield niptest.assert_equal, "%0.4f" % np.sqrt(r.MSE), "0.8019"
# the difference here comes from the fact that
# we've treated sigma as a nuisance parameter,
# so our AIC is the AIC of the profiled log-likelihood...
yield niptest.assert_equal, '%0.4f'% (r.AIC + 2,), '317.1017'
yield niptest.assert_equal, '%0.4f'% (r.BIC + np.log(126),), '359.6459'
# this is the file "data.csv" referred to in Rscript above
Rdata = '''
Y X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
2.558020266818153345e+02 -4.423009200784273898e-02 -6.615177603161188392e-03 -2.429792163411158279e-02 4.236447886547620167e-02 1.618533936246031348e-03 -8.683269025079367589e-04 -8.181821468255191711e-04 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.573856564029982792e+02 -1.247753847628743987e-02 8.132393396825286086e-03 -4.413603363412710312e-02 3.174380286547619917e-02 1.507591026246031356e-03 -8.321096135079367661e-04 -5.268108768253958792e-04 1.000000000000000000e+00 2.027260000000000062e+00 4.109783107600000207e+00 8.331598902713176713e+00 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.590080857852332201e+02 -3.265906165554512651e-03 1.963457496825285822e-03 -1.398771363412710383e-02 3.088127086547619998e-02 1.672285950246031301e-03 -8.927174265079367271e-04 -4.244701868253958994e-04 1.000000000000000000e+00 4.054520000000000124e+00 1.643913243040000083e+01 6.665279122170541370e+01 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.607408786477914759e+02 -8.017150588157394330e-04 2.213062996825285525e-03 1.398740365872893493e-03 1.085352386547620146e-02 1.533498042246031435e-03 -7.043727325079367782e-04 -4.042463468253959091e-04 1.000000000000000000e+00 6.081780000000000186e+00 3.698804796840000364e+01 2.249531703732557730e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.611418084786566283e+02 -1.861685769802005528e-04 1.047713639682528591e-02 1.167152736587289547e-02 1.489745686547620102e-02 1.548124779246031315e-03 -5.563730125079367241e-04 -1.481969968253959513e-04 1.000000000000000000e+00 8.109040000000000248e+00 6.575652972160000331e+01 5.332223297736433096e+02 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.625281634787599501e+02 -4.117603177916723598e-05 9.983357396825286167e-03 2.268076636587289252e-02 3.341529466547620009e-02 1.378939226246031274e-03 -5.824833125079368051e-04 -1.637155968253958946e-04 1.000000000000000000e+00 1.013630000000000031e+01 1.027445776899999998e+02 1.041449862839147045e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.600881821274363688e+02 -8.724125662125817594e-06 2.118458339682528432e-02 -3.638986341271063796e-04 7.819901865476201752e-03 1.343526296246031447e-03 -4.266495825079367706e-04 -3.036430682539588335e-05 1.000000000000000000e+00 1.216356000000000037e+01 1.479521918736000146e+02 1.799625362986046184e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.604916986023719687e+02 -1.779095604735100062e-06 2.110365339682528443e-02 -1.333419963412710470e-02 3.556263356547620380e-02 1.176156066246031480e-03 -2.915726925079367704e-04 -1.372058068253959344e-04 1.000000000000000000e+00 1.419082000000000043e+01 2.013793722724000190e+02 2.857738423630619764e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.631421465595319091e+02 -3.505829544571274576e-07 3.057060839682528355e-02 2.450720636587289808e-02 2.371273386547620085e-02 1.109560806246031196e-03 -4.451344925079367475e-04 -4.868320682539588849e-05 1.000000000000000000e+00 1.621808000000000050e+01 2.630261188864000133e+02 4.265778638189146477e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.607404475404462687e+02 -6.698859808659203534e-08 4.212096239682527887e-02 4.201216436587289910e-02 1.535293186547620134e-02 1.200805636246031222e-03 -4.756955025079367830e-04 4.163935317460414412e-05 1.000000000000000000e+00 1.824533999999999878e+01 3.328924317155999688e+02 6.073735600077903655e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.602563139919928403e+02 -1.244731173797263160e-08 3.868433239682528280e-02 3.198940136587289512e-02 1.951312986547620171e-02 1.210561816246031458e-03 -5.037184525079367245e-04 1.853174317460412092e-05 1.000000000000000000e+00 2.027260000000000062e+01 4.109783107599999994e+02 8.331598902713176358e+03 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.597932184819765098e+02 -2.254732652415686417e-09 3.464322639682528016e-02 2.498494136587289804e-02 6.040923865476201249e-03 1.251570966246031346e-03 -3.408492325079367884e-04 -2.053166825395852726e-06 1.000000000000000000e+00 2.229986000000000246e+01 4.972837560196001050e+02 1.108935813951124146e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.600692996257253071e+02 -3.990740854251533582e-10 3.209237439682528781e-02 1.811942636587289546e-02 2.605920586547620307e-02 1.177732906246031254e-03 -5.077881225079367488e-04 5.365363174604119087e-06 1.000000000000000000e+00 2.432712000000000074e+01 5.918087674944000582e+02 1.439700290388836947e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.605557611538409901e+02 -6.912161668563663771e-11 4.299601339682528056e-02 2.895994436587289583e-02 1.417107986547620074e-02 1.265060666246031361e-03 -7.339628625079367124e-04 1.238756831746040893e-04 1.000000000000000000e+00 2.635437999999999903e+01 6.945533451843999728e+02 1.830452278926084546e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.595077391981066626e+02 -1.172812338719269821e-11 3.317149439682529066e-02 1.328090936587289494e-02 1.022893186547620126e-02 1.374031606246031408e-03 -5.220871725079368267e-04 1.413575031746041374e-04 1.000000000000000000e+00 2.838164000000000087e+01 8.055174890896000761e+02 2.286190738904495811e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.587979640652715148e+02 -1.964186707357858839e-12 2.405623739682528558e-02 -1.810522634127103431e-03 1.576445486547620178e-02 1.135956976246031312e-03 -5.014120825079368057e-04 1.611867531746041847e-04 1.000000000000000000e+00 3.040890000000000271e+01 9.247011992100001407e+02 2.811914629665697794e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.596659555937277446e+02 -3.223083090335421760e-13 3.234481339682528100e-02 2.004408536587289763e-02 2.356408786547620204e-02 1.221481986246031413e-03 -6.670757425079366920e-04 1.487958231746040706e-04 1.000000000000000000e+00 3.243616000000000099e+01 1.052104475545600053e+03 3.412622910551317182e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.584320401508220471e+02 -9.003308688382024074e-14 3.619885939682528087e-02 2.789771365872894399e-03 9.189109865476198513e-03 1.135373276246031326e-03 -4.355060825079367357e-04 1.002332231746041503e-04 1.000000000000000000e+00 3.446341999999999928e+01 1.187727318096400040e+03 4.093314540902982844e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.580819809866689525e+02 -3.906123070653587881e-14 3.660551639682528557e-02 -1.860463412710344766e-05 2.714363586547620388e-02 1.120834376246031315e-03 -4.501944025079367639e-04 1.202024331746040682e-04 1.000000000000000000e+00 3.649067999999999756e+01 1.331569726862399875e+03 4.858988480062322924e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.594975650647641601e+02 1.601430181974213516e-14 3.905011839682528962e-02 9.654908365872898190e-03 1.281982286547620267e-02 1.076811816246031270e-03 -6.519448025079367355e-04 1.400206731746040907e-04 1.000000000000000000e+00 3.851794000000000295e+01 1.483631701843600240e+03 5.714643687370968837e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.605247214249999956e+02 9.795389708948228080e-02 3.677422139682529068e-02 2.608958736587289190e-02 2.185457486547620273e-02 1.235064666246031345e-03 -6.071577725079368385e-04 1.763112331746040417e-04 1.000000000000000000e+00 4.054520000000000124e+01 1.643913243039999998e+03 6.665279122170541086e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.615678287570015073e+02 7.636684347997682032e+00 2.837993739682528535e-02 3.336949636587289297e-02 2.712176086547619935e-02 1.121492386246031227e-03 -3.887845825079367800e-04 9.757465317460415049e-05 1.000000000000000000e+00 4.257245999999999952e+01 1.812414350451600058e+03 7.715893743802672543e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.606581922590170848e+02 1.688917484910420086e+01 3.424000439682528540e-02 5.953364365872893665e-03 1.839351286547620187e-02 1.118185646246031353e-03 -3.785339525079367985e-04 2.395393531746040213e-04 1.000000000000000000e+00 4.459972000000000492e+01 1.989135024078400420e+03 8.871486511608993169e+04 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.585156749757550756e+02 1.131722623416632167e+01 3.749442739682529169e-02 -1.501305634127106381e-03 1.711901486547620296e-02 1.368664136246031289e-03 -5.395318625079368116e-04 1.879513531746040403e-04 1.000000000000000000e+00 4.662698000000000320e+01 2.174075263920400175e+03 1.013705638493112347e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.589431610190735000e+02 1.345714208625528263e+00 3.218309039682527850e-02 -7.129233634127103703e-03 2.217183586547620197e-02 1.429032466246031368e-03 -5.373530925079368203e-04 1.592906031746042046e-04 1.000000000000000000e+00 4.865424000000000149e+01 2.367235069977600233e+03 1.151760232311069558e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.588859099636547398e+02 -3.786197907636791982e+00 2.637535539682528754e-02 -1.390411634127106111e-03 1.310852586547620047e-02 1.517677216246031326e-03 -5.291699825079366776e-04 1.052765531746040640e-04 1.000000000000000000e+00 5.068149999999999977e+01 2.568614442250000138e+03 1.301812328548933729e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.584379032107140688e+02 -4.100675927724760861e+00 2.384725139682528430e-02 -1.080336163412710590e-02 -4.173090134523799177e-03 1.358116916246031227e-03 -4.800622625079367331e-04 5.590095317460413646e-05 1.000000000000000000e+00 5.270875999999999806e+01 2.778213380737599891e+03 1.464361823140867637e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.595410206851418025e+02 -2.630373115496683400e+00 1.004822839682528376e-02 9.314062365872892435e-03 -9.878861345237952007e-04 1.325770276246031245e-03 -4.428060525079367620e-04 -2.427069682539584328e-05 1.000000000000000000e+00 5.473602000000000345e+01 2.996031885440400401e+03 1.639908612021034642e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.607257898907158165e+02 -1.286200190109046071e+00 2.464792639682528499e-02 2.035648336587289609e-02 -6.855731345237967012e-04 1.419879466246031343e-03 -6.113658025079368383e-04 1.115435631746041455e-04 1.000000000000000000e+00 5.676328000000000173e+01 3.222069956358400304e+03 1.828952591123596649e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.588783224743828555e+02 -5.223127938147428262e-01 2.786826139682528278e-02 1.117468365872894415e-03 -1.241363713452380002e-02 1.415631896246031260e-03 -4.147048725079367825e-04 -1.723451682539593396e-05 1.000000000000000000e+00 5.879054000000000002e+01 3.456327593491600055e+03 2.031993656382716435e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.588356428472260973e+02 -1.842356108573543483e-01 2.425059939682528559e-02 -4.276288634127104610e-03 -1.091986813452380106e-02 1.392750786246031280e-03 -4.490394525079367555e-04 -1.003586682539589405e-05 1.000000000000000000e+00 6.081780000000000541e+01 3.698804796840000563e+03 2.249531703732558235e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.597484695395635299e+02 -5.807770807625862314e-02 1.325085839682528521e-02 -3.310795634127106785e-03 2.611598386547619999e-02 1.344393666246031368e-03 -5.894356525079367040e-04 -4.194197682539594491e-05 1.000000000000000000e+00 6.284506000000000370e+01 3.949501566403600464e+03 2.482066629107282788e+05 -0.000000000000000000e+00 -0.000000000000000000e+00 -0.000000000000000000e+00
2.586971711680070598e+02 -1.669108953593786623e-02 1.520021739682528641e-02 -6.521448634127104127e-03 1.323596186547620207e-02 1.018124536246031329e-03 -5.651434125079368188e-04 -1.186629568253958888e-04 1.000000000000000000e+00 6.487232000000000198e+01 4.208417902182400212e+03 2.730098328441053745e+05 6.637850845511725772e-01 -0.000000000000000000e+00 -0.000000000000000000e+00
2.578038305276642745e+02 -4.438179736810902651e-03 1.418104939682528556e-02 -1.458225563412710556e-02 2.076608686547620070e-02 7.166574462460313308e-04 -6.010164225079367385e-04 -2.031235568253959454e-04 1.000000000000000000e+00 6.689958000000000027e+01 4.475553804176400263e+03 2.994126697668033885e+05 2.437840493460591773e+01 -0.000000000000000000e+00 -0.000000000000000000e+00
2.575900012845362994e+02 -1.104415351769467155e-03 1.171448539682528461e-02 -6.411356341271060022e-04 2.179420786547620059e-02 7.711998362460313790e-04 -5.958785525079367436e-04 -1.778974268253958766e-04 1.000000000000000000e+00 6.892683999999999855e+01 4.750909272385600161e+03 3.274651632722386275e+05 1.195928942034693989e+02 -0.000000000000000000e+00 -0.000000000000000000e+00
2.592522649854503811e+02 -2.595046810460775255e-04 5.653468396825284473e-03 -3.306909634127105230e-03 3.415740386547620050e-02 7.991702162460313699e-04 -5.105784425079367903e-04 -2.023469768253959109e-04 1.000000000000000000e+00 7.095409999999999684e+01 5.034484306809999907e+03 3.572173029538273695e+05 3.362968463074205374e+02 -0.000000000000000000e+00 -0.000000000000000000e+00
2.579003985729477790e+02 -5.799523371039054791e-05 4.075954396825285861e-03 -5.813851634127106816e-03 3.851734186547620120e-02 8.126851062460313437e-04 -4.455600825079367448e-04 -3.203095468253959032e-04 1.000000000000000000e+00 7.298135999999999513e+01 5.326278907449599501e+03 3.887190784049858339e+05 7.244798546627382620e+02 -0.000000000000000000e+00 -0.000000000000000000e+00
2.577110562270163996e+02 -1.240153176296573148e-05 1.982903996825284912e-03 -5.751847634127105896e-03 1.817295686547620165e-02 6.980794162460313449e-04 -3.607846825079367298e-04 -3.361090868253959027e-04 1.000000000000000000e+00 7.500862000000000762e+01 5.626293074304400761e+03 4.220204792191306478e+05 1.334131512685706639e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.593279674352351662e+02 -2.549813093163372416e-06 2.012354196825284422e-03 -3.176191634127106811e-03 2.634695186547620152e-02 5.562481362460312394e-04 -4.909143225079367614e-04 -2.835488168253958450e-04 1.000000000000000000e+00 7.703588000000000591e+01 5.934526807374400960e+03 4.571714949896775070e+05 2.215241413792596632e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.604506571831263386e+02 -5.061830223553558920e-07 3.248753396825284495e-03 5.653695365872894729e-03 3.363641326547620047e-02 4.461581362460312686e-04 -5.631164925079367844e-04 -1.737951468253959427e-04 1.000000000000000000e+00 7.906314000000000419e+01 6.250980106659601006e+03 4.942221153100429801e+05 3.417799151399689890e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.602953723174513812e+02 -9.736609629218024716e-08 6.825325968252849568e-04 1.423937136587289515e-02 3.023103586547620097e-02 7.006392762460313377e-04 -5.004090925079366942e-04 -1.539339168253958537e-04 1.000000000000000000e+00 8.109040000000000248e+01 6.575652972159999990e+03 5.332223297736432869e+05 4.991794318923266474e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.606369817776421769e+02 -1.820092841812642467e-08 -1.136286590317471534e-02 3.619031336587289621e-02 1.424289986547620096e-02 5.533487362460313193e-04 -4.338583525079367596e-04 -1.890155468253958962e-04 1.000000000000000000e+00 8.311766000000000076e+01 6.908545403875599732e+03 5.742221279738948215e+05 6.987216509779604166e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.593342616024719973e+02 -3.315296284192901071e-09 -5.857725263174714918e-03 2.357598136587289728e-02 1.897169486547620187e-02 7.518108062460313089e-04 -5.384554125079367383e-04 -1.363035768253958785e-04 1.000000000000000000e+00 8.514491999999999905e+01 7.249657401806400230e+03 6.172714995042138034e+05 9.454055317384982118e+03 -0.000000000000000000e+00 -0.000000000000000000e+00
2.586453520651357962e+02 -5.897348231645986935e-10 1.111030896825284872e-03 2.246285136587289344e-02 2.219625186547620130e-02 6.593569362460313795e-04 -4.778790125079367536e-04 -7.630101682539586726e-05 1.000000000000000000e+00 8.717217999999999734e+01 7.598988965952399667e+03 6.624204339580164524e+05 1.244230033515567993e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.590023951682784400e+02 -1.026285419063397840e-10 1.848365996825284893e-03 1.420209336587289345e-02 2.652135286547620263e-02 9.330586362460312937e-04 -5.569034125079367487e-04 -8.223069682539586433e-05 1.000000000000000000e+00 8.919944000000000983e+01 7.956540096313601680e+03 7.097189209287194535e+05 1.600194115650800268e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.602716614134758402e+02 -1.749597348817579870e-11 -4.677688603174715194e-03 1.815530536587289800e-02 5.745579865476198311e-03 6.605902962460313572e-04 -5.903785325079367440e-04 -1.106166468253958835e-04 1.000000000000000000e+00 9.122670000000000812e+01 8.322310792890000812e+03 7.592169500097383279e+05 2.018296737485818085e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.604482429819940421e+02 -2.915444713749153634e-12 9.829689682528536254e-05 1.461135536587289396e-02 1.032855886547619922e-02 6.060708362460314087e-04 -5.028199025079367092e-04 9.170133174604125012e-06 1.000000000000000000e+00 9.325396000000000640e+01 8.696301055681600701e+03 8.109645107944898773e+05 2.503536858362251587e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.603872844034092395e+02 -4.662988791519401875e-13 -8.091808403174714781e-03 2.668391636587289645e-02 9.499642865476200237e-03 6.190488562460314068e-04 -5.573827825079367406e-04 -1.419941268253958845e-04 1.000000000000000000e+00 9.528122000000000469e+01 9.078510884688401347e+03 8.650115928763902048e+05 3.060913437621728735e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.604142717232071504e+02 -8.815554369269722195e-14 7.379531968252847629e-04 1.966617536587289550e-02 5.218423865476204404e-03 7.821939762460313177e-04 -6.720836925079368140e-04 -1.368856682539584639e-05 1.000000000000000000e+00 9.730848000000000297e+01 9.468940279910400932e+03 9.214081858488556463e+05 3.695425434605876944e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.596725946468405937e+02 -4.790854546882667301e-14 3.729469396825285318e-03 1.677155036587289760e-02 9.729758654761985759e-04 7.744619962460313600e-04 -6.579227325079367063e-04 3.219561317460413550e-05 1.000000000000000000e+00 9.933574000000000126e+01 9.867589241347599454e+03 9.802042793053025380e+05 4.412071808656324720e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.581306961166553151e+02 -1.980567423292065087e-14 1.638672296825285576e-03 -6.475722634127104721e-03 1.390103865476201295e-03 4.816735362460312572e-04 -6.694806825079367436e-04 -9.350514682539593728e-05 1.000000000000000000e+00 1.013629999999999995e+02 1.027445776900000055e+04 1.041449862839146983e+06 5.215851519114699477e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.583217628919668982e+02 -1.405585884156201381e-14 7.728531396825284727e-03 -3.856817634127103489e-03 5.960830865476204887e-03 3.423149362460312529e-04 -7.660289725079367888e-04 2.281447317460411506e-05 1.000000000000000000e+00 1.033902599999999978e+02 1.068954586286759877e+04 1.105194926043805433e+06 6.111763525322629721e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.598821128165189407e+02 -1.691532273721650273e-14 2.477927296825284398e-03 1.116856365872893886e-03 9.179691865476201362e-03 7.097850162460313164e-04 -8.175605915079367601e-04 -5.294306825395908231e-06 1.000000000000000000e+00 1.054175199999999961e+02 1.111285352295039957e+04 1.171489458512694109e+06 7.104806786621743231e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.585404205373588979e+02 5.840602392497974451e-15 -3.963158031747146190e-04 7.451014365872893341e-03 3.865376865476201351e-03 5.380693362460314128e-04 -7.396422825079367394e-04 -2.474268682539594241e-05 1.000000000000000000e+00 1.074447800000000086e+02 1.154438074924840112e+04 1.240383449839229695e+06 8.199980262353675789e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.574534264776349914e+02 1.712000128500727594e-14 -8.327767103174715108e-03 6.492053658728944021e-04 -4.315605134523795017e-03 4.314180362460313858e-04 -5.235343725079368016e-04 -1.426233668253959388e-04 1.000000000000000000e+00 1.094720400000000069e+02 1.198412754176160161e+04 1.311926889616827713e+06 9.402282911860039167e+04 -0.000000000000000000e+00 -0.000000000000000000e+00
2.587782193488699249e+02 1.886219388254293476e-14 4.761096396825285557e-03 -7.202196341271061009e-04 -2.113392134523800481e-03 4.052769362460314270e-04 -7.262424025079366931e-04 -9.712075682539588351e-05 1.000000000000000000e+00 1.114993000000000052e+02 1.243209390049000103e+04 1.386169767438904848e+06 1.071671369448246987e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.598656445159390387e+02 2.347227720962027251e-14 -4.165797203174715496e-03 1.295209736587289717e-02 -1.783551213452379963e-02 4.884648362460312747e-04 -5.813059725079367619e-04 -7.004130682539588988e-05 1.000000000000000000e+00 1.135265600000000035e+02 1.288827982543360122e+04 1.463162072898877319e+06 1.214827156956259423e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.600989598110621728e+02 7.813072328225483567e-15 1.221070796825285756e-03 1.337387336587289588e-02 -1.252786513452380096e-02 2.161711362460314121e-04 -5.074466025079367101e-04 2.142214317460411615e-05 1.000000000000000000e+00 1.155538200000000018e+02 1.335268531659240034e+04 1.542953795590161346e+06 1.370195549644204148e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.608026132195976174e+02 -8.925257391752444914e-15 1.228668539682528649e-02 1.208959736587289502e-02 -2.235864113452379343e-02 1.684635362460312931e-04 -2.464530425079367254e-04 1.124107331746041069e-04 1.000000000000000000e+00 1.175810800000000000e+02 1.382531037396640022e+04 1.625594925106173148e+06 1.538276443446243939e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.599810088414655525e+02 -1.025966746681070654e-14 2.181112039682528425e-02 -1.205161763412710557e-02 -1.086435413452380150e-02 -5.987476375396861422e-05 -3.407551025079368036e-04 1.726038431746041530e-04 1.000000000000000000e+00 1.196083399999999983e+02 1.430615499755559904e+04 1.711135451040329412e+06 1.719569734296541719e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.590798220474434288e+02 -1.906044947566650386e-14 1.003784239682528612e-02 6.137143365872895634e-03 3.477642546547619895e-02 -2.676582637539685807e-04 -2.744146425079367797e-04 7.012074317460411776e-05 1.000000000000000000e+00 1.216356000000000108e+02 1.479521918736000225e+04 1.799625362986046588e+06 1.914575318129261141e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.594494534850605305e+02 -2.578066919736734499e-14 -2.027138103174714809e-03 -6.372505634127105523e-03 2.919624086547620290e-02 -3.534829637539685723e-04 -3.414351725079367138e-04 -9.094636825395874605e-06 1.000000000000000000e+00 1.236628600000000091e+02 1.529250294337960258e+04 1.891114650536739733e+06 2.123793090878562944e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.580147282408462956e+02 -1.888533906750968316e-14 -1.798189060317471888e-02 8.892993658728941264e-04 1.529699586547620185e-02 -1.785335637539686715e-04 -3.668640225079367609e-04 -1.523243868253959478e-04 1.000000000000000000e+00 1.256901200000000074e+02 1.579800626561440185e+04 1.985653303285826230e+06 2.347722948478611070e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.573028837927551535e+02 -1.384973992480027394e-14 -2.226030160317471474e-02 -1.401617563412710550e-02 9.232429865476204922e-03 -2.170017637539685754e-04 -6.020543625079367335e-04 -1.919957668253958593e-04 1.000000000000000000e+00 1.277173800000000057e+02 1.631172915406440188e+04 2.083291310826721834e+06 2.586864786863568006e+05 -0.000000000000000000e+00 -0.000000000000000000e+00
2.578661686497533765e+02 6.466134453156046314e-15 -2.225478460317471471e-02 -4.346986634127105592e-03 4.281016865476203193e-03 7.098093624603144143e-05 -4.939255525079367390e-04 -1.331850268253959284e-04 1.000000000000000000e+00 1.297446400000000040e+02 1.683367160872960085e+04 2.184078662752842996e+06 0.000000000000000000e+00 5.310280676409380618e+00 -0.000000000000000000e+00
2.591778558577004219e+02 3.881029512404210243e-14 -3.350587260317471061e-02 3.708508365872893731e-03 3.303729865476202898e-03 4.290136246031276898e-06 -4.810798125079367789e-04 -1.990675968253958908e-04 1.000000000000000000e+00 1.317719000000000165e+02 1.736383362961000603e+04 2.288065348657606635e+06 0.000000000000000000e+00 5.366368736595970290e+01 -0.000000000000000000e+00
2.586265557848932417e+02 2.496704000974017557e-14 -4.055766460317471178e-02 2.324536365872896526e-03 -1.314141813452379681e-02 -2.111011637539687423e-04 -4.601720925079367608e-04 -2.897881768253959302e-04 1.000000000000000000e+00 1.337991600000000005e+02 1.790221521670560105e+04 2.395301358134427108e+06 0.000000000000000000e+00 1.950272394768473418e+02 -0.000000000000000000e+00
2.590581804587237684e+02 1.020916719346166513e-14 -3.380893360317471785e-02 4.358221365872893410e-03 -1.662428913452379531e-02 -3.211422637539687076e-04 -4.006317125079367453e-04 -1.464107968253959514e-04 1.000000000000000000e+00 1.358264200000000130e+02 1.844881637001640411e+04 2.505836680776723661e+06 0.000000000000000000e+00 4.793905304253556778e+02 -0.000000000000000000e+00
2.583827214705520987e+02 2.027411219651766781e-14 -3.782695560317471395e-02 1.149229936587289197e-02 -1.630400713452379718e-02 -2.047094637539685711e-04 -2.136010125079367472e-04 -1.059907068253958815e-04 1.000000000000000000e+00 1.378536799999999971e+02 1.900363708954240064e+04 2.619721306177909020e+06 0.000000000000000000e+00 9.567431536277551913e+02 -0.000000000000000000e+00
2.588504729398947006e+02 -2.959667608385212738e-16 -2.737255860317471326e-02 2.306047836587289679e-02 -1.175693013452380059e-02 -1.525203637539687424e-04 -2.631168025079367104e-04 -9.378550682539587170e-05 1.000000000000000000e+00 1.398809400000000096e+02 1.956667737528360158e+04 2.737005223931403365e+06 0.000000000000000000e+00 1.677074702500338617e+03 -0.000000000000000000e+00
2.575437003556809259e+02 -1.510831198685233849e-14 -2.817193160317471579e-02 -8.620721634127109789e-03 -1.014567713452380060e-02 -2.024390637539686885e-04 2.442606749206328864e-05 -1.798543568253958532e-04 1.000000000000000000e+00 1.419081999999999937e+02 2.013793722723999963e+04 2.857738423630618956e+06 0.000000000000000000e+00 2.690374770459364299e+03 -0.000000000000000000e+00
2.571050917428615890e+02 -1.222166334909334567e-14 -3.965530660317471978e-02 -7.093271634127106678e-03 -2.676973013452380035e-02 -2.402326637539686175e-04 -1.294388825079367941e-04 -2.178491468253959109e-04 1.000000000000000000e+00 1.439354600000000062e+02 2.071741664541160208e+04 2.981970894868975971e+06 0.000000000000000000e+00 4.046632950921140036e+03 -0.000000000000000000e+00
2.578799640847943806e+02 -2.669000963219823434e-14 -4.578354560317471345e-02 -1.935690153412710640e-02 -1.530625134523795616e-03 -3.285852637539686972e-04 -2.997716825079367771e-04 -1.772051168253958690e-04 1.000000000000000000e+00 1.459627199999999903e+02 2.130511562979839800e+04 3.109752627239886671e+06 0.000000000000000000e+00 5.795838837301906096e+03 -0.000000000000000000e+00
2.593543512047501167e+02 -1.557425937872241460e-14 -5.462931060317471887e-02 -1.786486341271049938e-04 -2.675493513452380234e-02 -3.041632637539686251e-04 -2.994083325079367969e-04 -2.266904168253959084e-04 1.000000000000000000e+00 1.479899800000000027e+02 2.190103418040040197e+04 3.241133610336771701e+06 0.000000000000000000e+00 7.987982023017991196e+03 -0.000000000000000000e+00
2.592103613515139955e+02 1.025550116606464834e-14 -5.747345460317471177e-02 -2.301652634127106245e-03 -3.055690313452380513e-02 -1.852517637539686981e-04 -7.782878250793675776e-05 -2.941239768253959370e-04 1.000000000000000000e+00 1.500172400000000152e+02 2.250517229721760305e+04 3.376163833753045183e+06 0.000000000000000000e+00 1.067305210148565311e+04 -0.000000000000000000e+00
2.593191728453962241e+02 7.314866478049465998e-15 -4.823187060317471464e-02 1.890068236587289646e-02 -4.777992713452379470e-02 -3.452388637539688387e-04 -1.024134925079367604e-04 -3.109670268253958468e-04 1.000000000000000000e+00 1.520444999999999993e+02 2.311752998025000124e+04 3.514893287082121242e+06 0.000000000000000000e+00 1.390103866612112324e+04 -0.000000000000000000e+00
2.592008769899240974e+02 2.221644269997001270e-14 -4.118386960317471646e-02 1.733267436587289378e-02 -4.355931913452379400e-02 -3.705732637539686618e-04 -2.771284925079367436e-04 -1.953945868253958865e-04 1.000000000000000000e+00 1.540717600000000118e+02 2.373810722949760384e+04 3.657371959917420056e+06 0.000000000000000000e+00 1.772193131034077305e+04 -0.000000000000000000e+00
2.594888566963954304e+02 3.423980053733376596e-14 -3.876614060317470911e-02 1.016017036587289757e-02 -5.628503713452380486e-02 -3.304482637539686487e-04 -1.241367425079367053e-04 -9.316598682539590105e-05 1.000000000000000000e+00 1.560990199999999959e+02 2.436690404496039991e+04 3.803649841852353886e+06 0.000000000000000000e+00 2.218571962756076755e+04 -0.000000000000000000e+00
2.592471779187787320e+02 2.090355192478126067e+00 -4.206244260317471007e-02 1.105673136587289468e-02 -4.754148013452379196e-02 -2.150553637539685901e-04 -3.158812625079367815e-04 -1.838400068253959359e-04 1.000000000000000000e+00 1.581262800000000084e+02 2.500392042663840402e+04 3.953776922480343841e+06 0.000000000000000000e+00 2.734239321119751912e+04 -0.000000000000000000e+00
2.595144644726888146e+02 1.390900631135700216e+01 -4.419308660317471105e-02 2.374663636587289600e-02 -5.757486113452379983e-02 -3.322781637539685886e-04 3.979992749206327091e-05 -1.636741968253958715e-04 1.000000000000000000e+00 1.601535399999999925e+02 2.564915637453159798e+04 4.107803191394800786e+06 0.000000000000000000e+00 3.324194165466715640e+04 -0.000000000000000000e+00
2.594934264342520578e+02 1.557696618507103814e+01 -4.037264960317471507e-02 1.567967136587289367e-02 -6.731542113452379517e-02 -3.889040637539684965e-04 6.342409749206321789e-05 -1.471519668253958805e-04 1.000000000000000000e+00 1.621808000000000050e+02 2.630261188863999996e+04 4.265778638189146295e+06 0.000000000000000000e+00 3.993435455138613179e+04 -0.000000000000000000e+00
2.599376394425571561e+02 6.004799507075502696e+00 -2.857072660317471618e-02 1.227729936587289294e-02 -4.839276813452379755e-02 -4.437891637539687073e-04 9.347311749206322923e-05 -8.371388682539590391e-05 1.000000000000000000e+00 1.642080600000000175e+02 2.696428696896360634e+04 4.427753252456794493e+06 0.000000000000000000e+00 4.746962149477063213e+04 -0.000000000000000000e+00
2.583130545235978275e+02 -1.994345614804481137e+00 -3.650895860317471264e-02 6.498904365872894273e-03 -2.158240113452379594e-02 -4.707137637539686968e-04 5.781790749206320440e-05 -1.285526168253958669e-04 1.000000000000000000e+00 1.662353200000000015e+02 2.763418161550239893e+04 4.593777023791158572e+06 0.000000000000000000e+00 5.589773207823683333e+04 -0.000000000000000000e+00
2.576154366178921009e+02 -4.354781600224979066e+00 -3.754501060317471522e-02 -1.127231463412710355e-02 -2.067503813452380157e-02 -4.761822637539686598e-04 1.106139774920631693e-04 -2.297192168253959360e-04 1.000000000000000000e+00 1.682625800000000140e+02 2.831229582825640318e+04 4.763899941785659641e+06 0.000000000000000000e+00 6.526867589520123147e+04 -0.000000000000000000e+00
2.582447671201464914e+02 -3.421137938612250018e+00 -3.709204260317471025e-02 -2.815319033412710253e-02 -3.209472813452379780e-02 -4.201502637539685295e-04 1.587400974920632384e-04 -1.486038868253958603e-04 1.000000000000000000e+00 1.702898399999999981e+02 2.899862960722560092e+04 4.938171996033710428e+06 0.000000000000000000e+00 7.563244253907985694e+04 -0.000000000000000000e+00
2.578366809073544346e+02 -1.884871573445409121e+00 -4.559719660317471113e-02 -2.012774773412710425e-02 -4.258769413452380415e-02 -5.238649637539687445e-04 1.121453374920631770e-04 -3.780857468253959143e-04 1.000000000000000000e+00 1.723171000000000106e+02 2.969318295241000305e+04 5.116643176128730178e+06 0.000000000000000000e+00 8.703902160328927857e+04 -0.000000000000000000e+00
2.575530122743335824e+02 -8.333182129281194728e-01 -5.434145660317471482e-02 -3.934316634127105194e-03 -2.802218613452379936e-02 -6.544949637539688135e-04 -4.547183250793677273e-05 -4.325602468253959159e-04 1.000000000000000000e+00 1.743443599999999947e+02 3.039595586380959867e+04 5.299363471664131619e+06 0.000000000000000000e+00 9.953840268124543945e+04 -0.000000000000000000e+00
2.582481297574381642e+02 -3.138703752643597356e-01 -7.287733960317471782e-02 -5.080906634127104610e-03 -3.453186913452380158e-02 -4.803973637539688153e-04 1.780781974920633099e-04 -5.289674068253958326e-04 1.000000000000000000e+00 1.763716200000000072e+02 3.110694834142440232e+04 5.486382872233334929e+06 0.000000000000000000e+00 1.131805753663649812e+05 -0.000000000000000000e+00
2.585080012650139452e+02 -1.043136344467513188e-01 -6.379200960317471525e-02 -1.374258063412710576e-02 -2.450723213452379867e-02 -4.271225637539686863e-04 1.437427974920632524e-04 -5.507417468253958956e-04 1.000000000000000000e+00 1.783988800000000197e+02 3.182616038525440672e+04 5.677751367429755628e+06 0.000000000000000000e+00 1.280155292520640214e+05 -0.000000000000000000e+00
2.589810122439655515e+02 -3.132084140102636693e-02 -7.903580860317471757e-02 -1.139652463412710488e-02 -3.978782313452379482e-02 -7.604801637539687284e-04 7.115520749206329099e-05 -5.629854968253959323e-04 1.000000000000000000e+00 1.804261400000000037e+02 3.255359199529960097e+04 5.873518946846805513e+06 0.000000000000000000e+00 1.440932539317586052e+05 -0.000000000000000000e+00
2.592973588137387537e+02 -8.642004611292256402e-03 -7.117489360317472147e-02 6.563063658728933436e-04 -1.220494713452379559e-02 -1.040174863753968570e-03 1.255475674920631573e-04 -4.912412868253959080e-04 1.000000000000000000e+00 1.824534000000000162e+02 3.328924317156000325e+04 6.073735600077906623e+06 0.000000000000000000e+00 1.614637389988654468e+05 -0.000000000000000000e+00
2.597658000548336190e+02 -2.219631667718219379e-03 -7.383044660317471253e-02 1.410036136587289324e-02 7.094414865476204868e-03 -1.154148363753968592e-03 1.615656974920631457e-04 -4.779029468253959113e-04 1.000000000000000000e+00 1.844806600000000003e+02 3.403311391403560265e+04 6.278451316716470756e+06 0.000000000000000000e+00 1.801769740468003438e+05 -0.000000000000000000e+00
2.600563664053747743e+02 -5.360794304548768905e-04 -6.586775660317471803e-02 1.655973336587289457e-02 -3.370324134523795812e-03 -1.024209463753968704e-03 2.467476974920631986e-04 -3.545917268253958969e-04 1.000000000000000000e+00 1.865079200000000128e+02 3.478520422272640280e+04 6.487716086355919018e+06 0.000000000000000000e+00 2.002829486689801270e+05 -0.000000000000000000e+00
2.602901128078428314e+02 -1.227230843113079188e-04 -7.049731160317471157e-02 2.987536736587289438e-02 -2.844490013452380395e-02 -1.023074663753968574e-03 3.075099974920633130e-04 -4.335420468253959139e-04 1.000000000000000000e+00 1.885351799999999969e+02 3.554551409763239644e+04 6.701579898589661345e+06 0.000000000000000000e+00 2.218316524588204629e+05 -0.000000000000000000e+00
2.598264999690118202e+02 -2.680547757517946417e-05 -6.999111960317472292e-02 3.268389136587289412e-02 -1.014415313452379785e-02 -1.264280463753968825e-03 3.754828974920633141e-04 -4.390807868253959116e-04 1.000000000000000000e+00 1.905624400000000094e+02 3.631404353875360539e+04 6.920092743011121638e+06 0.000000000000000000e+00 2.448730750097382988e+05 -0.000000000000000000e+00
2.593596969358279694e+02 -5.616675076130314912e-06 -7.061410360317471602e-02 1.946565236587289444e-02 1.298353186547620067e-02 -1.411778063753968502e-03 3.464828974920631513e-04 -4.315975268253958975e-04 1.000000000000000000e+00 1.925896999999999935e+02 3.709079254609000054e+04 7.143304609213708900e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.050658692729931676e-01
2.590111897581169842e+02 -1.134042666258773269e-06 -6.430232360317471307e-02 9.387877365872897284e-03 1.989402986547620170e-02 -1.206621563753968590e-03 3.619273974920631963e-04 -2.785400868253959123e-04 1.000000000000000000e+00 1.946169600000000059e+02 3.787576111964160373e+04 7.371265486790845171e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.792219728288165825e+01
2.592187248340080146e+02 -2.214888922805985211e-07 -6.659902160317471287e-02 2.656261365872894520e-03 3.570156865476202535e-03 -1.165767363753968631e-03 2.659288974920633259e-04 -4.230125468253958989e-04 1.000000000000000000e+00 1.966442200000000184e+02 3.866894925940840767e+04 7.604025365335945040e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.001701567040806395e+02
2.568569463550700789e+02 -4.198101968002987330e-08 -7.323089060317471144e-02 -1.627500873412710686e-02 -1.577161013452380023e-02 -1.102308463753968581e-03 3.161176974920632205e-04 -5.168182368253959342e-04 1.000000000000000000e+00 1.986714800000000025e+02 3.947035696539039782e+04 7.841634234442420304e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.969385375491461332e+02
2.567240790713361207e+02 -7.743329643075380204e-09 -6.897513560317471148e-02 -1.611915993412710302e-02 -2.616736134523796331e-03 -1.200253663753968539e-03 1.929508974920633073e-04 -5.036689468253959488e-04 1.000000000000000000e+00 2.006987400000000150e+02 4.027998423758760327e+04 8.084142083703693934e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.582169332343630686e+02
2.586193994029661667e+02 -1.393367178829911838e-09 -5.634666760317472156e-02 -1.460659013412710441e-02 -7.355770134523799408e-03 -1.106632863753968712e-03 2.397183974920633369e-04 -2.815068668253959110e-04 1.000000000000000000e+00 2.027259999999999991e+02 4.109783107600000221e+04 8.331598902713175863e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.233994937175999667e+03
2.583841010140561707e+02 -2.451015754665610884e-10 -5.365690960317472114e-02 -3.176442634127106535e-03 -1.357681813452379926e-02 -1.359759063753968565e-03 3.363015974920633170e-04 -3.537066968253959058e-04 1.000000000000000000e+00 2.047532600000000116e+02 4.192389748062760191e+04 8.584054681064289063e+06 0.000000000000000000e+00 0.000000000000000000e+00 2.074262142790351845e+03
2.580570629821268653e+02 -4.223306201495954454e-11 -6.084147260317471217e-02 -3.061121634127104973e-03 -7.098403134523798008e-03 -1.362605363753968592e-03 3.012735974920632180e-04 -3.800203668253959157e-04 1.000000000000000000e+00 2.067805199999999957e+02 4.275818345147039508e+04 8.841559408350443467e+06 0.000000000000000000e+00 0.000000000000000000e+00 3.229008143493673742e+03
2.579470443876787158e+02 -7.156816105084053148e-12 -5.542562460317471129e-02 -7.138105634127103749e-03 -8.081440134523797114e-03 -1.391156263753968482e-03 4.135093974920632997e-04 -3.021409368253959084e-04 1.000000000000000000e+00 2.088077800000000082e+02 4.360068898852840357e+04 9.104163074165061116e+06 0.000000000000000000e+00 0.000000000000000000e+00 4.748222532702277931e+03
2.599391914289993224e+02 -1.207054311834787104e-12 -3.929887060317471814e-02 -1.462635634127105316e-03 8.798824865476201351e-03 -1.426038263753968451e-03 4.433595974920632532e-04 -2.323506468253958533e-04 1.000000000000000000e+00 2.108350399999999922e+02 4.445141409180159826e+04 9.371915668101552874e+06 0.000000000000000000e+00 0.000000000000000000e+00 6.681894903832399905e+03
2.598000299780277942e+02 -2.147932142301532625e-13 -3.044666260317471437e-02 3.492261365872894457e-03 1.358364186547620159e-02 -1.401727663753968636e-03 6.015499974920632828e-04 -3.316270368253958971e-04 1.000000000000000000e+00 2.128623000000000047e+02 4.531035876129000098e+04 9.644867179753340781e+06 0.000000000000000000e+00 0.000000000000000000e+00 9.080014850300372927e+03
2.586574230707965967e+02 -4.005466736646009179e-14 -2.476531660317471406e-02 -5.519612634127105122e-03 -4.220371134523795420e-03 -1.356848963753968466e-03 6.767285974920632342e-04 -1.464826568253959098e-04 1.000000000000000000e+00 2.148895600000000172e+02 4.617752299699360447e+04 9.923067598713837564e+06 0.000000000000000000e+00 0.000000000000000000e+00 1.199257196552245478e+04
2.580585287471001266e+02 5.576182320169238469e-15 -2.481630060317471451e-02 -2.066747903412710641e-02 -8.855922134523797062e-03 -1.513199763753968717e-03 7.608736974920631479e-04 -2.132055668253959218e-04 1.000000000000000000e+00 2.169168200000000013e+02 4.705290679891240143e+04 1.020656691457645781e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.546955584291486957e+04
2.596834585519351322e+02 2.568277743391322110e-14 -1.720004460317471617e-02 -3.208105634127104283e-03 6.712917865476203394e-03 -1.568501263753968675e-03 8.467745974920633202e-04 -4.144829682539588698e-05 1.000000000000000000e+00 2.189440800000000138e+02 4.793651016704640642e+04 1.049541511693462171e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.956095607589399515e+04
2.602373452688122484e+02 2.864205519224055026e-14 -6.253708943174715595e-03 -9.945376634127107290e-03 4.145976865476200257e-03 -1.333169263753968686e-03 9.531898974920633176e-04 3.677603317460409731e-05 1.000000000000000000e+00 2.209713399999999979e+02 4.882833310139559762e+04 1.078966219538174197e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.431676225787599833e+04
2.597039792615581746e+02 4.243141342386595795e-14 1.595174939682528562e-02 -1.055324663412710549e-02 -1.035979134523801193e-03 -1.308523563753968659e-03 9.431188974920631492e-04 1.944633531746040077e-04 1.000000000000000000e+00 2.229986000000000104e+02 4.972837560196000413e+04 1.108935813951123878e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.978696398227728423e+04
2.587581297465923740e+02 5.218249358888024564e-14 1.167652339682528559e-02 -3.154518813412710704e-02 -1.016064713452379670e-02 -1.509701063753968696e-03 1.229371797492063208e-03 3.136779317460414856e-05 1.000000000000000000e+00 2.250258599999999944e+02 5.063663766873959685e+04 1.139455293891652301e+07 0.000000000000000000e+00 0.000000000000000000e+00 3.602155084251398512e+04
2.603926301344870922e+02 3.008952168432361289e-14 5.463445239682528098e-02 1.774224736587289714e-02 -2.389310713452379165e-02 -1.121983163753968726e-03 1.694905097492063056e-03 6.866294531746040862e-04 1.000000000000000000e+00 2.270531200000000069e+02 5.155311930173440487e+04 1.170529658319101855e+07 0.000000000000000000e+00 0.000000000000000000e+00 4.307051243200255703e+04
2.602516580326667963e+02 -6.752827934503707979e-15 4.810599839682529189e-02 -1.496451783412710429e-02 -1.839609713452379502e-02 -1.091134563753968535e-03 2.077571597492063049e-03 6.492220531746040147e-04 1.000000000000000000e+00 2.290803800000000194e+02 5.247782050094440638e+04 1.202163906192813627e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.098383834415919409e+04
2.592875295662722124e+02 -5.123726758011432889e-14 5.266199139682528618e-02 -3.588364363412710478e-02 -2.446927813452379197e-02 -1.311484563753968768e-03 2.101589897492062934e-03 4.946715531746040178e-04 1.000000000000000000e+00 2.311076400000000035e+02 5.341074126636960136e+04 1.234363036472129077e+07 0.000000000000000000e+00 0.000000000000000000e+00 5.981151817240004311e+04
2.592308820611373790e+02 -4.564269168653986185e-14 6.118641339682527602e-02 -3.072892273412710512e-02 -5.481309213452380258e-02 -1.255843163753968695e-03 2.399166697492063124e-03 8.114795531746040380e-04 1.000000000000000000e+00 2.331349000000000160e+02 5.435188159801000438e+04 1.267132048116390407e+07 0.000000000000000000e+00 0.000000000000000000e+00 6.960354151014162926e+04
2.595619725021097111e+02 -7.003595564454634878e-15 7.061374139682527473e-02 -2.235936743412710384e-02 -9.913570134523798372e-03 -1.268399263753968668e-03 2.261032397492063195e-03 8.009784531746040040e-04 1.000000000000000000e+00 2.351621600000000001e+02 5.530124149586560088e+04 1.300475940084938519e+07 0.000000000000000000e+00 0.000000000000000000e+00 8.040989795079996111e+04
2.590598186666346123e+02 6.860929725324630356e-16 7.201945939682527498e-02 -3.425773313412710380e-02 -2.546723413452380014e-02 -1.075733563753968579e-03 2.168366397492063266e-03 7.489506531746041021e-04 1.000000000000000000e+00 2.371894200000000126e+02 5.625882095993640542e+04 1.334399711337115988e+07 0.000000000000000000e+00 0.000000000000000000e+00 9.228057708779163659e+04
2.587420925533573950e+02 1.294219066619279127e-14 7.647773339682528704e-02 -3.709144563412710566e-02 -2.783978113452380276e-02 -1.551370363753968619e-03 2.241157097492063165e-03 8.262895531746041215e-04 1.000000000000000000e+00 2.392166799999999967e+02 5.722461999022239615e+04 1.368908360832263529e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.052655685145325697e+05
2.593739234782133281e+02 2.489479921874677366e-14 8.438259339682528670e-02 -4.554863663412710151e-02 -2.931516913452379691e-02 -1.568884063753968675e-03 2.324322897492063066e-03 1.015189053174604152e-03 1.000000000000000000e+00 2.412439400000000091e+02 5.819863858672360220e+04 1.404006887529723532e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.194148618244394165e+05
2.595694737699126904e+02 1.555171091878242343e-14 1.248369423968252873e-01 -2.113519634127106195e-03 -3.012072513452379585e-02 -1.548244163753968554e-03 1.854907397492062959e-03 1.469042453174604010e-03 1.000000000000000000e+00 2.432712000000000216e+02 5.918087674944000901e+04 1.439700290388837270e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.347784466109283094e+05
2.589335042145449961e+02 2.528064545633945617e-14 1.386098423968253057e-01 -1.554300763412710430e-02 -4.327043134523797518e-03 -1.380193763753968534e-03 1.668424897492063255e-03 1.876337353174604202e-03 1.000000000000000000e+00 2.452984600000000057e+02 6.017133447837160202e+04 1.475993568368945830e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.514063124674152350e+05
2.585203309350814038e+02 1.708358532944420453e-14 1.239822423968252735e-01 -5.095475363412710346e-02 2.847714586547619997e-02 -1.985254663753968554e-03 1.673778397492063034e-03 1.607149453174604170e-03 1.000000000000000000e+00 2.473257200000000182e+02 6.117001177351841034e+04 1.512891720429391786e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.693484489873169805e+05
2.577332151445505701e+02 -1.306066815792015049e-14 1.361771723968253078e-01 -4.787148863412710176e-02 4.220229221547620174e-02 -1.763331063753968610e-03 1.475171397492063084e-03 1.748681753174604041e-03 1.000000000000000000e+00 2.493529800000000023e+02 6.217690863488039759e+04 1.550399745529516041e+07 0.000000000000000000e+00 0.000000000000000000e+00 1.886548457640492998e+05
2.560893166165346315e+02 -2.617243662451785409e-14 1.327212623968253014e-01 -5.556048863412710315e-02 3.156961486547620044e-02 -1.981807663753968815e-03 1.528626197492063011e-03 1.673368953174604080e-03 1.000000000000000000e+00 2.513802400000000148e+02 6.319202506245760742e+04 1.588522642628660984e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.093754923910290236e+05
2.583923782750806595e+02 -3.599677893875522221e-14 1.378797223968253050e-01 -4.097437163412710748e-02 4.345609296547620071e-02 -1.820374863753968561e-03 1.497760797492063162e-03 1.892659753174604376e-03 1.000000000000000000e+00 2.534074999999999989e+02 6.421536105624999618e+04 1.627265410686167143e+07 0.000000000000000000e+00 0.000000000000000000e+00 2.315603784616718476e+05
'''
| 146.118252
| 382
| 0.822572
| 7,339
| 56,840
| 6.362583
| 0.250307
| 0.185737
| 0.135218
| 0.068359
| 0.277546
| 0.177728
| 0.16762
| 0.15128
| 0.094678
| 0.092066
| 0
| 0.788127
| 0.075387
| 56,840
| 388
| 383
| 146.494845
| 0.100371
| 0.010521
| 0
| 0.00939
| 0
| 0.605634
| 0.944886
| 0.893743
| 0
| 0
| 0
| 0
| 0.140845
| 1
| 0.004695
| false
| 0
| 0.028169
| 0
| 0.032864
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
670069006b45cb268edbcf498510bbda5164ad6d
| 6,628
|
py
|
Python
|
climpred/tests/test_alignment.py
|
raybellwaves/climpred
|
4ce5e3d30dbaa98fb974b54d82a5403c424a79db
|
[
"MIT"
] | 104
|
2020-09-17T16:46:37.000Z
|
2022-03-29T16:49:44.000Z
|
climpred/tests/test_alignment.py
|
raybellwaves/climpred
|
4ce5e3d30dbaa98fb974b54d82a5403c424a79db
|
[
"MIT"
] | 303
|
2020-09-17T16:05:24.000Z
|
2022-03-28T19:59:31.000Z
|
climpred/tests/test_alignment.py
|
kpegion/climpred
|
b3562311af253b9ee0e0cd97d196b0fd34936031
|
[
"MIT"
] | 18
|
2020-10-08T15:40:42.000Z
|
2022-03-29T19:07:54.000Z
|
import logging
import numpy as np
import pytest
import xskillscore as xs
from climpred.exceptions import CoordinateError
from climpred.prediction import compute_hindcast
def test_same_inits_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that inits are identical at all leads for `same_inits` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "inits: 1954-01-01 00:00:00-2007-01-01 00:00:00" in record[2]
def test_same_inits_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1954, 2007
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"verifs: {FIRST_INIT+i}-01-01 00:00:00-{LAST_INIT+i}-01-01"
in record[2]
)
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_verif_time(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint time in the verification
data, i.e., non-continuous time sampling to verify against."""
hind = small_initialized_da
verif = small_verif_da.drop_sel(time=1992)
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1991, 1992, 1993]
# verif times: [1990, 1991, 1993, 1994]
a = hind.sel(init=[1990, 1992, 1993]).rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_inits(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint inits in the verification
data, i.e., non-continuous initializing to verify with."""
hind = small_initialized_da.drop_sel(init=1991)
verif = small_verif_da
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1992, 1993]
# verif times: [1990, 1991, 1992, 1993, 1994]
a = hind.rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
def test_same_verifs_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that verifs are identical at all leads for `same_verifs` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "verifs: 1964-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
def test_same_verifs_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1964, 2017
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"inits: {FIRST_INIT-i}-01-01 00:00:00-{LAST_INIT-i}-01-01 00:00:00"
in record[2]
)
def test_same_verifs_raises_error_when_not_possible(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime
):
"""Tests that appropriate error is raised when a common set of verification dates
cannot be found with the supplied initializations."""
hind = hind_ds_initialized_1d_cftime.isel(lead=slice(0, 3), init=[1, 3, 5, 7, 9])
with pytest.raises(CoordinateError):
compute_hindcast(hind, reconstruction_ds_1d_cftime, alignment="same_verifs")
def test_maximize_alignment_inits(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate inits are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"inits: 1954-01-01 00:00:00-{2016-i}-01-01 00:00:00" in record[2]
)
def test_maximize_alignment_verifs(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"verifs: {1955+i}-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
)
| 37.027933
| 88
| 0.638956
| 854
| 6,628
| 4.716628
| 0.163934
| 0.063555
| 0.075968
| 0.084906
| 0.81281
| 0.802383
| 0.789225
| 0.756455
| 0.73858
| 0.726415
| 0
| 0.066912
| 0.262674
| 6,628
| 178
| 89
| 37.235955
| 0.757315
| 0.171092
| 0
| 0.688406
| 0
| 0.043478
| 0.092015
| 0.014568
| 0
| 0
| 0
| 0
| 0.072464
| 1
| 0.065217
| false
| 0
| 0.043478
| 0
| 0.108696
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
672669c234855969496e3d70836e376ca328b213
| 12,033
|
py
|
Python
|
neuvol/individs/initialization_network.py
|
Qwinpin/Neural_evolution
|
f8ebc8a171a6954718aa25f461e9b93afd698b8a
|
[
"Apache-2.0"
] | 6
|
2019-06-28T10:02:02.000Z
|
2021-03-02T10:03:48.000Z
|
neuvol/individs/initialization_network.py
|
Qwinpin/Neural_evolution
|
f8ebc8a171a6954718aa25f461e9b93afd698b8a
|
[
"Apache-2.0"
] | 13
|
2018-11-27T13:45:33.000Z
|
2019-10-04T11:22:32.000Z
|
neuvol/individs/initialization_network.py
|
Qwinpin/Neuvol
|
f8ebc8a171a6954718aa25f461e9b93afd698b8a
|
[
"Apache-2.0"
] | 2
|
2018-09-26T12:07:18.000Z
|
2018-09-26T12:09:46.000Z
|
import torch
import numpy as np
class Network(torch.nn.Module):
def __init__(self, structure):
super(Network, self).__init__()
self.structure = structure
self.layers_pool_inited = self.init_layers(self.structure)
def init_layers(self, structure):
# pool of layers, which should be initialised and connected
layers_pool = [0]
# pool of initialised layers
layers_pool_inited = {}
# pool of broken (invalid) layers) such as inconsistent number of dimensions
layers_pool_removed = []
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(self.structure.matrix[:, layer_index] == 1)[0])
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (layers_pool_inited.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
acc = layers_pool.pop(0)
layers_pool.append(acc)
continue
# take Layer instance of the previous layers
input_layers = [self.structure.layers_index_reverse[layer] for layer in enter_layers]
# layer without rank is broken and we ignore that
input_layers = [layer for layer in input_layers if layer.config.get('rank', False)]
enter_layers = [i for i in enter_layers if i not in layers_pool_removed]
# if curent layer is the Input - initialise without any input connections
if not input_layers and self.structure.layers_index_reverse[layer_index].layer_type == 'input':
inited_layer = (None, None, self.structure.layers_index_reverse[layer_index].init_layer(None))
# detect hanging node - some of mutations could remove connection to the layer
elif not input_layers:
layers_pool_removed.append(layers_pool.pop(0))
continue
# if there are multiple input connections
elif len(input_layers) > 1:
# this case does not require additional processing - all logic is inside Layer instance,
# which handles multiple connections
inited_layer = self.structure.layers_index_reverse[layer_index]([None for _ in range(len(input_layers))], input_layers)
else:
input_layers_inited = [layers_pool_inited[layer] for layer in enter_layers][0]
inited_layer = self.structure.layers_index_reverse[layer_index](None, input_layers[0])
# add new initialised layer
layers_pool_inited[layer_index] = inited_layer
setattr(self, 'layer_{}'.format(layer_index), inited_layer[2])
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(self.structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in layers_pool_inited.keys()]
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
self.layers_pool_removed = layers_pool_removed
return layers_pool_inited
def forward(self, x):
# pool of layers, which should be initialised and connected
layers_pool = [0]
buffer_x = {-1: x}
last_value = None
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(self.structure.matrix[:, layer_index] == 1)[0])
enter_layers = [i for i in enter_layers if i not in self.layers_pool_removed]
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (buffer_x.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in self.layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
layers_pool.append(layers_pool.pop(0))
continue
# take Layer instance of the previous layers
temp_x = [buffer_x[layer] for layer in enter_layers]
# if curent layer is the Input - initialise without any input connections
if not enter_layers and self.structure.layers_index_reverse[layer_index].layer_type == 'input':
if self.layers_pool_inited[layer_index][0] is not None:
raise "Input layer is not the first one. Incorrect graph structure"
if self.layers_pool_inited[layer_index][1] is not None:
reshaper = self.layers_pool_inited[layer_index][1] # .init_layer(None)
temp_x = reshaper(buffer_x[-1])
else:
temp_x = buffer_x[-1]
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
# detect hanging node - some of mutations could remove connection to the layer
elif not enter_layers:
continue
# if there are multiple input connections
elif len(enter_layers) > 1:
if self.layers_pool_inited[layer_index][0] is not None:
reshapers = self.layers_pool_inited[layer_index][0][0]
axis = self.layers_pool_inited[layer_index][0][1]
if reshapers is not None:
reshapers = [i.init_layer(None) for i in reshapers]
temp_x = [r(temp_x[i]) for i, r in enumerate(reshapers)]
temp_x = torch.cat(temp_x, axis)
if self.layers_pool_inited[layer_index][1] is not None:
temp_x = self.layers_pool_inited[layer_index][1](temp_x)
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
else:
temp_x = temp_x[0]
if self.layers_pool_inited[layer_index][1] is not None:
reshaper = self.layers_pool_inited[layer_index][1] # .init_layer(None)
temp_x = reshaper(temp_x)
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(self.structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in buffer_x.keys()]
last_value = result_x
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
return last_value
def process_layer_output(self, x, layer_type):
"""
Some layer returns intermediate results, usually we dont need that
"""
if layer_type == 'lstm':
return x[0]
else:
return x
def recalculate_shapes(structure):
# pool of layers, which should be initialised and connected
layers_pool = [0]
# pool of initialised layers
layers_pool_inited = {}
# pool of broken (invalid) layers) such as inconsistent number of dimensions
layers_pool_removed = []
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(structure.matrix[:, layer_index] == 1)[0])
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (layers_pool_inited.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
acc = layers_pool.pop(0)
layers_pool.append(acc)
continue
# take Layer instance of the previous layers
input_layers = [structure.layers_index_reverse[layer] for layer in enter_layers]
# layer without rank is broken and we ignore that
input_layers = [layer for layer in input_layers if layer.config.get('rank', False)]
enter_layers = [i for i in enter_layers if i not in layers_pool_removed]
# if curent layer is the Input - initialise without any input connections
if not input_layers and structure.layers_index_reverse[layer_index].layer_type == 'input':
inited_layer = (None, None, None)
# detect hanging node - some of mutations could remove connection to the layer
elif not input_layers:
layers_pool_removed.append(layers_pool.pop(0))
continue
# if there are multiple input connections
elif len(input_layers) > 1:
# this case does not require additional processing - all logic is inside Layer instance,
# which handles multiple connections
inited_layer = structure.layers_index_reverse[layer_index]([None for _ in range(len(input_layers))], input_layers, init=False)
else:
input_layers_inited = [layers_pool_inited[layer] for layer in enter_layers][0]
inited_layer = structure.layers_index_reverse[layer_index](None, input_layers[0], init=False)
# add new initialised layer
layers_pool_inited[layer_index] = inited_layer
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in layers_pool_inited.keys()]
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
| 46.639535
| 162
| 0.639491
| 1,579
| 12,033
| 4.645345
| 0.098163
| 0.09816
| 0.054533
| 0.048671
| 0.890661
| 0.888207
| 0.877982
| 0.859714
| 0.853715
| 0.847035
| 0
| 0.006032
| 0.297349
| 12,033
| 257
| 163
| 46.821012
| 0.861502
| 0.242832
| 0
| 0.597015
| 0
| 0
| 0.010411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037313
| false
| 0
| 0.014925
| 0
| 0.089552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
677e440453d9f9ce5d3a0e9f426abf7cd35c3066
| 191
|
py
|
Python
|
platform/hwconf_data/efm32pg12b/modules/PIN/PIN_Snippets.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | null | null | null |
platform/hwconf_data/efm32pg12b/modules/PIN/PIN_Snippets.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T02:36:22.000Z
|
2020-08-25T02:36:22.000Z
|
platform/hwconf_data/efm32pg12b/modules/PIN/PIN_Snippets.py
|
lenloe1/v2.7
|
9ac9c4a7bb37987af382c80647f42d84db5f2e1d
|
[
"Zlib"
] | 1
|
2020-08-25T01:56:04.000Z
|
2020-08-25T01:56:04.000Z
|
"""
Generated from a template
"""
import efm32pg12b.PythonSnippet.RuntimeModel as RuntimeModel
from efm32pg12b.modules.PIN.PIN_Defs import PORT_PINS
def activate_runtime():
pass
| 11.235294
| 60
| 0.769634
| 23
| 191
| 6.26087
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049689
| 0.157068
| 191
| 16
| 61
| 11.9375
| 0.844721
| 0.13089
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
67c23c3fe298c711154bfea1ebfc6c851d164708
| 67
|
py
|
Python
|
example/tests/test.py
|
cristinasewell/pyenvbuilder
|
610674473c7d0f3b1733231f624ba9a9f7a9f908
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-01-08T19:44:59.000Z
|
2021-01-08T19:44:59.000Z
|
example/tests/test.py
|
cristinasewell/pyenvbuilder
|
610674473c7d0f3b1733231f624ba9a9f7a9f908
|
[
"BSD-3-Clause-LBNL"
] | 19
|
2020-04-02T18:37:02.000Z
|
2021-05-27T18:04:53.000Z
|
example/tests/test.py
|
cristinasewell/pyenvbuilder
|
610674473c7d0f3b1733231f624ba9a9f7a9f908
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2020-04-02T17:37:38.000Z
|
2020-12-13T00:02:40.000Z
|
import platform
print 'Python version', platform.python_version()
| 16.75
| 49
| 0.80597
| 8
| 67
| 6.625
| 0.625
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 3
| 50
| 22.333333
| 0.883333
| 0
| 0
| 0
| 0
| 0
| 0.208955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
67f78d2d25e83f23ad38ea3e2472e785cf44ff44
| 4,225
|
py
|
Python
|
src/test/scripting/test_assignment.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | 4
|
2020-05-16T21:58:55.000Z
|
2020-08-29T11:17:31.000Z
|
src/test/scripting/test_assignment.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | 1
|
2020-12-15T11:22:32.000Z
|
2020-12-15T11:22:32.000Z
|
src/test/scripting/test_assignment.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | null | null | null |
"""Assigning values in different tests."""
from test.scripting.abc import ScriptingTest
class TestAssignment(ScriptingTest):
"""Test to assign values."""
def test_int(self):
"""Create a variable with a simple integer value."""
script = self.write_script("variable = 5")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, 5)
def test_float(self):
"""Create a variable with a simple float value."""
script = self.write_script("variable = 2.5")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, 2.5)
def test_var(self):
"""Create a copy variable."""
script = self.write_script("""
variable1 = 12
variable2 = variable1
""")
variable2 = script.get_variable_or_attribute("variable2")
self.assertEqual(variable2, 12)
def test_str_apostrophes(self):
"""Create a variable with a simple string surrounded by apostrophes."""
script = self.write_script("variable = 'ok'")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, "ok")
def test_str_double_quotes(self):
"""Create a variable with a string surrounded by double quotes."""
script = self.write_script("variable = \"thanks\"")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, "thanks")
def test_str_mul_ded(self):
"""Create a variable with a multiline string using ""> <""."""
script = self.write_script("""
variable = "">
This is a string
with at least
three lines.
<""
""")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable,
"This is a string with at least three lines.")
def test_str_mul_pre(self):
"""Create a variable with a multiline string using ""| |""."""
script = self.write_script("""
variable = ""|
This is a string
with at least
three lines.
|""
""")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable,
"This is a string\nwith at least\nthree lines.")
def test_negative_int(self):
"""Create a variable with a simple integer value."""
script = self.write_script("variable = -5")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, -5)
def test_negative_float(self):
"""Create a variable with a simple float value."""
script = self.write_script("variable = -2.5")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, -2.5)
def test_negative_var(self):
"""Create a copy variable, the second is negative."""
script = self.write_script("""
variable1 = 12
variable2 = -variable1
""")
variable2 = script.get_variable_or_attribute("variable2")
self.assertEqual(variable2, -12)
def test_add(self):
"""Affect a variable to an addition."""
script = self.write_script("variable = 2 + 8")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, 10)
def test_sub(self):
"""Affect a variable to a subtraction."""
script = self.write_script("variable = 2 - 8")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, -6)
def test_mul(self):
"""Affect a variable to a multiplication."""
script = self.write_script("variable = 2 * 8")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, 16)
def test_div(self):
"""Affect a variable to a division."""
script = self.write_script("variable = 2 / 8")
variable = script.get_variable_or_attribute("variable")
self.assertEqual(variable, 0.25)
| 37.723214
| 79
| 0.60142
| 471
| 4,225
| 5.225053
| 0.167728
| 0.039821
| 0.085331
| 0.119464
| 0.826493
| 0.794393
| 0.736692
| 0.724502
| 0.724502
| 0.724502
| 0
| 0.01591
| 0.285917
| 4,225
| 111
| 80
| 38.063063
| 0.799801
| 0.165207
| 0
| 0.410256
| 0
| 0
| 0.250725
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 1
| 0.179487
| false
| 0
| 0.012821
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67fa82922158b5bd9625df6a883f524686dcfffe
| 131
|
py
|
Python
|
source/miniworldmaker/connectors/physics_connector.py
|
zormit/miniworldmaker
|
8003aece905b0cffec9850af3805b03372f3dc97
|
[
"MIT"
] | 9
|
2019-04-16T13:45:02.000Z
|
2022-02-23T08:46:57.000Z
|
source/miniworldmaker/connectors/physics_connector.py
|
zormit/miniworldmaker
|
8003aece905b0cffec9850af3805b03372f3dc97
|
[
"MIT"
] | 11
|
2019-08-08T11:31:50.000Z
|
2022-02-14T19:53:17.000Z
|
source/miniworldmaker/connectors/physics_connector.py
|
zormit/miniworldmaker
|
8003aece905b0cffec9850af3805b03372f3dc97
|
[
"MIT"
] | 3
|
2019-04-18T22:43:53.000Z
|
2020-04-29T13:46:08.000Z
|
from miniworldmaker.connectors import pixel_connector
class PhysicsBoardConnector(pixel_connector.PixelBoardConnector):
pass
| 21.833333
| 65
| 0.862595
| 12
| 131
| 9.25
| 0.833333
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099237
| 131
| 5
| 66
| 26.2
| 0.940678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
db21c453bbb6f1fcdf0c55c7c430135a1c0e7113
| 36
|
py
|
Python
|
isonet/models/__init__.py
|
bpicnbnk/ResNeXt.pytorch
|
53bae425c20a5b8ec69b3441ec12cfdc7d7231a6
|
[
"MIT"
] | null | null | null |
isonet/models/__init__.py
|
bpicnbnk/ResNeXt.pytorch
|
53bae425c20a5b8ec69b3441ec12cfdc7d7231a6
|
[
"MIT"
] | null | null | null |
isonet/models/__init__.py
|
bpicnbnk/ResNeXt.pytorch
|
53bae425c20a5b8ec69b3441ec12cfdc7d7231a6
|
[
"MIT"
] | null | null | null |
from isonet.models.isonext import *
| 18
| 35
| 0.805556
| 5
| 36
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1d482de3eaf979c2145065266a18d547aae037c
| 88
|
py
|
Python
|
src/conversation_engine/__init__.py
|
tue-robotics/conversation_engine
|
7cc5a734e57fbea9afdf1618358e777677b8df06
|
[
"BSD-2-Clause"
] | 1
|
2019-04-12T13:02:01.000Z
|
2019-04-12T13:02:01.000Z
|
src/conversation_engine/__init__.py
|
tue-robotics/conversation_engine
|
7cc5a734e57fbea9afdf1618358e777677b8df06
|
[
"BSD-2-Clause"
] | 17
|
2018-05-01T12:52:18.000Z
|
2019-08-12T13:29:47.000Z
|
src/conversation_engine/__init__.py
|
tue-robotics/conversation_engine
|
7cc5a734e57fbea9afdf1618358e777677b8df06
|
[
"BSD-2-Clause"
] | null | null | null |
from .engine import ConversationEngine, ConversationEngineUsingTopic, ConversationState
| 44
| 87
| 0.897727
| 6
| 88
| 13.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 88
| 1
| 88
| 88
| 0.963415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbf432a5aed3a3402630516587a2c8352a185d5c
| 137
|
py
|
Python
|
src/app/api/crud/__init__.py
|
pyronear/pyro-api
|
9244b79b29bd5171ea1d02ed3c9cbb3eea75b60d
|
[
"Apache-2.0"
] | 8
|
2020-11-13T14:21:34.000Z
|
2022-03-11T18:34:54.000Z
|
src/app/api/crud/__init__.py
|
pyronear/pyro-api
|
9244b79b29bd5171ea1d02ed3c9cbb3eea75b60d
|
[
"Apache-2.0"
] | 169
|
2020-11-11T15:47:07.000Z
|
2022-02-17T23:10:34.000Z
|
src/app/api/crud/__init__.py
|
pyronear/pyro-api
|
9244b79b29bd5171ea1d02ed3c9cbb3eea75b60d
|
[
"Apache-2.0"
] | 2
|
2021-02-15T10:41:48.000Z
|
2021-11-06T01:02:09.000Z
|
from .base import *
from . import accesses
from . import alerts
from . import authorizations
from . import groups
from . import webhooks
| 19.571429
| 28
| 0.773723
| 18
| 137
| 5.888889
| 0.444444
| 0.471698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175182
| 137
| 6
| 29
| 22.833333
| 0.938053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbf4592ad762f5f38aba26ca4fd24a7fdbaad368
| 16,983
|
py
|
Python
|
feature_selection_plots.py
|
ashu-vyas-github/Flemish_Sign_Language_Openpose
|
6a7365b9d06a822ea6ece280031c193f582da4c3
|
[
"MIT"
] | null | null | null |
feature_selection_plots.py
|
ashu-vyas-github/Flemish_Sign_Language_Openpose
|
6a7365b9d06a822ea6ece280031c193f582da4c3
|
[
"MIT"
] | null | null | null |
feature_selection_plots.py
|
ashu-vyas-github/Flemish_Sign_Language_Openpose
|
6a7365b9d06a822ea6ece280031c193f582da4c3
|
[
"MIT"
] | null | null | null |
# import numpy as np
# import pandas as pd
# from datetime import datetime
# import matplotlib.pyplot as plt
# from os.path import join as pjoin
# import util.vis as V
# import util.helpers as H
# import data_analysis
# import csv
# import random
# import gc
# from glob import glob
# import sklearn as sk
# from sklearn import preprocessing
# import feature_engineering.feature_preprocessing as feat_prepro
# import feature_engineering.feature_extractors_4D_array as feat_extract
# from feature_engineering.data_augmentation import SLRImbAugmentation
# import feature_engineering.data_augmentation as data_augm
# from sklearn.preprocessing import StandardScaler
# from sklearn.decomposition import PCA
# from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GroupKFold
# from util.stratified_group_cv import StratifiedGroupKFold
# from sklearn.feature_selection import SelectKBest
# from sklearn.pipeline import Pipeline
# from sklearn.model_selection import GridSearchCV
# from util.results_plots_evaluation import map3_scorer
# import util.results_plots_evaluation as results
# from sklearn.metrics import accuracy_score
# import util.helpers as kaggle_submission
# from sklearn.linear_model import LogisticRegression
# np.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
# rng = np.random.RandomState(42)
# startTime= datetime.now()
# n_splits = 5
# remove_keypoints = True
# save_plot = False
# unwanted_keypoints=[10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94]
# face_body_hand = [True, True, True, False, False, False, False, False, False, False]
# physics1 = [False, False, False, True, False, True, False, False, False, False]
# physics2 = [False, False, False, True, False, True, True, False, False, False]
# physics3 = [False, False, False, True, False, True, False, False, True, False]
# physics4 = [False, False, False, True, False, True, False, False, False, True]
# physics5 = [False, False, False, True, False, False, False, True, False, False]
# physics6 = [False, False, False, True, False, False, True, True, False, False]
# physics7 = [False, False, False, True, False, False, False, True, True, False]
# physics8 = [False, False, False, True, False, False, False, True, False, True]
# trajectory = [False, False, False, False, True, False, False, False, False, False]
# all_feat = [True, True, True, True, True, True, True, True, True, True]
# # PATHS
# DATA_DIR = '../data'
# POSE_DIR = '../data/pose'
# TRAIN_DIR = POSE_DIR + "/train"
# TEST_DIR = POSE_DIR + "/test"
# # Read CSV file of labels
# full_dataframe = pd.read_csv(pjoin(DATA_DIR, "labels.csv"))
# full_dataframe['Data'] = full_dataframe['File'].apply(lambda title: np.load(pjoin(TRAIN_DIR, title + ".npy")))
# print("\n~~~~~##### Start #####~~~~~\n")
# num_frames_list = [2, 4, 8, 15, 24, 48, 60]
# gscv_best_score_list = []
# validtt_map3_trn = []
# validtt_map3_vld = []
# for interpolated_total_frames in num_frames_list:
# print("\n\n\nRunning interpolated_total_frames:", interpolated_total_frames)
# # 4D data as (n_samples, n_frames, n_keypoints, n_coords)
# samples_centered_4D_array = feat_prepro.interpolate_allsamples(full_dataframe.Data, interpolated_total_frames=interpolated_total_frames, x_resolution=1.0, y_resolution=1.0)
# print("Interplated data shape",samples_centered_4D_array.shape)
# # Train and test split
# X_traintt, X_validtt, y_traintt, y_validtt, group_traintt, group_validtt = train_test_split(samples_centered_4D_array, np.asarray(full_dataframe.Label), np.asarray(full_dataframe.Person), test_size=0.25, random_state=42, shuffle=True, stratify=None)#=np.asarray(full_dataframe.Label))
# face_flag=True
# body_flag=True
# hand_flag=True
# physics_flag=True
# trajectory_flag=True
# linear_flag=True
# std_flag=True
# angular_flag=False
# velocity_flag=False
# acceleration_flag=False
# X_train = feat_extract.main_feature_extractor(array_4D_data=X_traintt, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
# X_valid = feat_extract.main_feature_extractor(array_4D_data=X_validtt, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
# ### Standard Scaler
# stdscl = StandardScaler()
# ### Cross validator
# # cvld = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.2, train_size=None, random_state=42)
# # cvld = StratifiedGroupKFold(n_splits=n_splits, shuffle=True, random_state=42)
# cvld = GroupKFold(n_splits=n_splits)
# ### Estimator
# estimator = LogisticRegression(C=1.0, tol=1e-4, class_weight=None, solver='lbfgs', max_iter=5000, multi_class='ovr', penalty='l2', dual=False, fit_intercept=True, intercept_scaling=1, random_state=42, verbose=0, warm_start=False, n_jobs=-1, l1_ratio=None)
# print("\nTraining the model", str(estimator))
# pipe = Pipeline([('scale', stdscl), ('clf', estimator)])
# ### Grid Search CV
# param_grid = dict(clf__C=np.logspace(-3, 1, 5))
# print("Running GSCV.....")
# grid = GridSearchCV(pipe, param_grid=param_grid, cv=cvld, n_jobs=-1, verbose=0, scoring=map3_scorer)
# grid.fit(X_train, y_traintt, group_traintt)
# print(grid.best_params_)
# print(grid.best_score_)
# gscv_best_score_list.append(grid.best_score_)
# map3_trn, map3_vld = results.predict_print_results(grid, X_train, X_valid, y_traintt, y_validtt)
# validtt_map3_trn.append(map3_trn)
# validtt_map3_vld.append(map3_vld)
# plt.rcParams.update({'font.size':6})
# bar_width = 0.25
# dpi_setting = 1200
# labels = num_frames_list
# fname = 'num_frames_logreg_mean_raw_groupK'
# plt.figure(num=None, figsize=None, dpi=dpi_setting, facecolor='w', edgecolor='w')
# plt.title("Optimum #Frames LogReg GroupK")
# plt.xlabel('Interpolated #Frames')
# plt.ylabel('map@3 score')
# plt.ylim(0.0,1.1)
# plt.bar(x=np.arange(len(gscv_best_score_list))-bar_width, height=gscv_best_score_list, width=bar_width, label='Best GSCV Score', align='center')
# plt.bar(x=np.arange(len(gscv_best_score_list)), height=validtt_map3_trn, width=bar_width, label='Training score', align='center')
# plt.bar(x=np.arange(len(gscv_best_score_list))+bar_width, height=validtt_map3_vld, width=bar_width, label='Test Score', align='center')
# plt.xticks(ticks=np.arange(len(gscv_best_score_list)), labels=labels, rotation=0)
# plt.grid(b=True, which='major', axis='both', linestyle=':', linewidth=0.5, alpha=1)
# plt.legend()
# plt.savefig("{txt1}.png".format(txt1=fname), dpi=dpi_setting, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format='png', transparent=False, bbox_inches='tight', pad_inches=0.1, metadata=None)
# plt.show()
# print("\n~~~~~##### Done #####~~~~~\n")
# timeElapsed = datetime.now() - startTime
# print('Time elpased (hh:mm:ss.ms) {}'.format(timeElapsed))
###############################################################################
###############################################################################
########### BELOW IS THE CODE FOR FEATURE SELECTION
###############################################################################
###############################################################################
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from os.path import join as pjoin
import util.vis as V
import util.helpers as H
import data_analysis
import csv
import random
import gc
from glob import glob
import sklearn as sk
from sklearn import preprocessing
import feature_engineering.feature_preprocessing as feat_prepro
import feature_engineering.feature_extractors_4D_array as feat_extract
from feature_engineering.data_augmentation import SLRImbAugmentation
import feature_engineering.data_augmentation as data_augm
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GroupKFold
from util.stratified_group_cv import StratifiedGroupKFold
from sklearn.feature_selection import SelectKBest
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from util.results_plots_evaluation import map3_scorer
import util.results_plots_evaluation as results
from sklearn.metrics import accuracy_score
import util.helpers as kaggle_submission
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
np.seterr(all='raise', divide='raise', over='raise', under='raise', invalid='raise')
rng = np.random.RandomState(42)
startTime= datetime.now()
interpolated_total_frames = 15
n_splits = 7
face_flag = None
body_flag = None
hand_flag = None
physics_flag = None
trajectory_flag = None
linear_flag = None
std_flag = None
angular_flag = None
velocity_flag = None
acceleration_flag = None
remove_keypoints = False
save_plot = False
unwanted_keypoints=[10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94]
face_body_hand = [True, True, True, False, False, False, False, False, False, False]
physics1 = [False, False, False, True, False, True, False, False, False, False]
physics2 = [False, False, False, True, False, True, True, False, False, False]
physics3 = [False, False, False, True, False, True, False, False, True, False]
physics4 = [False, False, False, True, False, True, False, False, False, True]
physics5 = [False, False, False, True, False, False, False, True, False, False]
physics6 = [False, False, False, True, False, False, True, True, False, False]
physics7 = [False, False, False, True, False, False, False, True, True, False]
physics8 = [False, False, False, True, False, False, False, True, False, True]
trajectory = [False, False, False, False, True, False, False, False, False, False]
all_feat = [True, True, True, True, True, True, True, True, True, True]
features_selection_array = np.array([face_body_hand, physics1, physics2, physics3, physics4, physics5, physics6, physics7, physics8, trajectory, all_feat])
print(features_selection_array.shape)
# PATHS
DATA_DIR = '../data'
POSE_DIR = '../data/pose'
TRAIN_DIR = POSE_DIR + "/train"
TEST_DIR = POSE_DIR + "/test"
# Read CSV file of labels
full_dataframe = pd.read_csv(pjoin(DATA_DIR, "labels.csv"))
full_dataframe['Data'] = full_dataframe['File'].apply(lambda title: np.load(pjoin(TRAIN_DIR, title + ".npy")))
# Resampling and augmentation setp
print("\n~~~~~##### Start #####~~~~~\n")
# 4D data as (n_samples, n_frames, n_keypoints, n_coords)
samples_centered_4D_array = feat_prepro.interpolate_allsamples(full_dataframe.Data, interpolated_total_frames=interpolated_total_frames, x_resolution=1.0, y_resolution=1.0)
print("Interpolated data shape",samples_centered_4D_array.shape)
# Train and test split
# X_traintt, X_validtt, y_traintt, y_validtt, group_traintt, group_validtt = train_test_split(samples_centered_4D_array, np.asarray(full_dataframe.Label), np.asarray(full_dataframe.Person), test_size=0.25, random_state=42, shuffle=True, stratify=None)#=np.asarray(full_dataframe.Label))
# print("Training shape 4D split",X_traintt.shape)
# print("Validation shape 4D split",X_validtt.shape)
y_train = np.asarray(full_dataframe.Label)
group_train = np.asarray(full_dataframe.Person)
#### Augmentation
# slr_obj = SLRImbAugmentation()
# X_traintt, y_traintt, group_traintt = slr_obj.fit(X=X_traintt, y=y_traintt, groups=group_traintt, augmentation_factor=2)
# X_traintt, y_traintt, group_traintt = data_augm.resample_data(X=X_traintt, y=y_traintt, groups=group_traintt)
gscv_best_score_list = []
validtt_map3_trn = []
# validtt_map3_vld = []
for one_feature_set_idx in range(features_selection_array.shape[0]):
print("\n\n\nRunning feature set:", one_feature_set_idx)
face_flag, body_flag, hand_flag, physics_flag, trajectory_flag, linear_flag, std_flag, angular_flag, velocity_flag, acceleration_flag = features_selection_array[one_feature_set_idx, :].ravel()
X_train = feat_extract.main_feature_extractor(array_4D_data=samples_centered_4D_array, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
# X_valid = feat_extract.main_feature_extractor(array_4D_data=X_validtt, face=face_flag, body=body_flag, hands=hand_flag, physics=physics_flag, trajectory=trajectory_flag, linear_flag=linear_flag, angular_flag=angular_flag, std_flag=std_flag, velocity_flag=velocity_flag, acceleration_flag=acceleration_flag, remove_keypoints=remove_keypoints, unwanted_keypoints=unwanted_keypoints)
### Standard Scaler
stdscl = StandardScaler()
### Feature selection
selection = VarianceThreshold(threshold=0.0)
### Cross validator
# cvld = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.2, train_size=None, random_state=42)
# cvld = StratifiedGroupKFold(n_splits=n_splits, shuffle=True, random_state=42)
cvld = GroupKFold(n_splits=n_splits)
### Estimator
# estimator = LogisticRegression(C=1.0, tol=1e-4, class_weight=None, solver='lbfgs', max_iter=5000, multi_class='ovr', penalty='l2', dual=False, fit_intercept=True, intercept_scaling=1, random_state=42, verbose=0, warm_start=False, n_jobs=-1, l1_ratio=None)
estimator = SVC(C=1.0, kernel='rbf', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovo', break_ties=False, random_state=42)
print("\nTraining the model", str(estimator))
pipe = Pipeline([('feat_select', selection), ('scale', stdscl), ('clf', estimator)])
### Grid Search CV
param_grid = dict( clf__C=[0.01, 0.1, 0.5, 1.0, 4.0, 8.0, 10.0])
print("Running GSCV.....")
grid = GridSearchCV(pipe, param_grid=param_grid, cv=cvld, n_jobs=-1, verbose=0, scoring=map3_scorer)
# grid.fit(X_train, y_traintt, group_traintt)
grid.fit(X_train, y_train, groups=group_train)
print(grid.best_params_)
print(grid.best_score_)
gscv_best_score_list.append(grid.best_score_)
map3_trn, map3_vld = results.predict_print_results(grid, X_train, X_train, y_train, y_train)
validtt_map3_trn.append(map3_trn)
# validtt_map3_vld.append(map3_vld)
plt.rcParams.update({'font.size':6})
bar_width = 0.25
dpi_setting = 1200
labels = [int(x+1) for x in range(features_selection_array.shape[0])]
fname = 'feat_sets_confidence_svcRBFovo_mean_raw_groupK'
plt.figure(num=None, figsize=None, dpi=dpi_setting, facecolor='w', edgecolor='w')
plt.title("Feature set comparison SVC-RBF GroupK")
plt.xlabel('Features set')
plt.ylabel('map@3 score')
plt.ylim(0.0,1.1)
plt.bar(x=np.arange(len(gscv_best_score_list))-bar_width/2.0, height=gscv_best_score_list, width=bar_width, label='Best GSCV Score', align='center')
plt.bar(x=np.arange(len(gscv_best_score_list))+bar_width/2.0, height=validtt_map3_trn, width=bar_width, label='Training score', align='center')
# plt.bar(x=np.arange(len(gscv_best_score_list))+bar_width, height=validtt_map3_vld, width=bar_width, label='Test Score', align='center')
plt.xticks(ticks=np.arange(len(gscv_best_score_list)), labels=labels, rotation=0)
plt.grid(b=True, which='major', axis='both', linestyle=':', linewidth=0.5, alpha=1)
plt.legend()
plt.savefig("{txt1}.png".format(txt1=fname), dpi=dpi_setting, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format='png', transparent=False, bbox_inches='tight', pad_inches=0.1, metadata=None)
plt.show()
print("\n~~~~~##### Done #####~~~~~\n")
timeElapsed = datetime.now() - startTime
print('Time elpased (hh:mm:ss.ms) {}'.format(timeElapsed))
# clf__C=[0.2, 0.4, 0.6, 0.8, 1.0, 2.0, 3.0, 5.0, 10.0] for Logreg OVR
# clf__C=[0.01, 0.1, 0.5, 1.0, 4.0, 8.0, 10.0] for SVC RBF OVO
#
| 44.574803
| 400
| 0.733616
| 2,491
| 16,983
| 4.773986
| 0.156965
| 0.077363
| 0.063068
| 0.041541
| 0.859317
| 0.848133
| 0.839556
| 0.83367
| 0.825933
| 0.819795
| 0
| 0.036738
| 0.118471
| 16,983
| 381
| 401
| 44.574803
| 0.757598
| 0.552553
| 0
| 0
| 0
| 0
| 0.070185
| 0.006509
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.278261
| 0
| 0.278261
| 0.095652
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
221dc46ad3caf2ce39c428ac80189e4fdfa7d7fd
| 106
|
py
|
Python
|
mime/agent/__init__.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | 13
|
2020-06-24T10:52:28.000Z
|
2021-07-23T03:05:27.000Z
|
mime/agent/__init__.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | 1
|
2020-08-18T12:45:15.000Z
|
2020-08-18T12:45:15.000Z
|
mime/agent/__init__.py
|
rjgpinel/mime-release
|
26a850c4ba5b702b86d068995614163338fb01df
|
[
"MIT"
] | 3
|
2020-09-09T18:17:46.000Z
|
2021-09-06T09:43:45.000Z
|
from .script_agent import ScriptAgent
from .vr_agent import VRAgent
from .replay_agent import ReplayAgent
| 26.5
| 37
| 0.858491
| 15
| 106
| 5.866667
| 0.6
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 3
| 38
| 35.333333
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2250b5947e99dcbb1d5d50bd6dc4648af3d620c9
| 1,194
|
py
|
Python
|
bfrs/migrations/0005_auto_20170919_1340.py
|
xzzy/bfrs
|
07eeaffff207bf4fca1c95a5ba25c9118c9eab7a
|
[
"Apache-2.0"
] | null | null | null |
bfrs/migrations/0005_auto_20170919_1340.py
|
xzzy/bfrs
|
07eeaffff207bf4fca1c95a5ba25c9118c9eab7a
|
[
"Apache-2.0"
] | 3
|
2020-02-12T00:03:12.000Z
|
2021-12-13T19:45:47.000Z
|
bfrs/migrations/0005_auto_20170919_1340.py
|
xzzy/bfrs
|
07eeaffff207bf4fca1c95a5ba25c9118c9eab7a
|
[
"Apache-2.0"
] | 5
|
2018-02-16T02:05:40.000Z
|
2022-01-18T03:35:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-19 05:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bfrs', '0004_auto_20170911_1051'),
]
operations = [
migrations.AlterField(
model_name='bushfire',
name='dispatch_aerial',
field=models.NullBooleanField(verbose_name=b'Aerial support requested'),
),
migrations.AlterField(
model_name='bushfire',
name='dispatch_aerial_date',
field=models.DateTimeField(blank=True, null=True, verbose_name=b'Aerial support request date'),
),
migrations.AlterField(
model_name='bushfiresnapshot',
name='dispatch_aerial',
field=models.NullBooleanField(verbose_name=b'Aerial support requested'),
),
migrations.AlterField(
model_name='bushfiresnapshot',
name='dispatch_aerial_date',
field=models.DateTimeField(blank=True, null=True, verbose_name=b'Aerial support request date'),
),
]
| 33.166667
| 108
| 0.612228
| 117
| 1,194
| 6.059829
| 0.435897
| 0.112835
| 0.141044
| 0.163611
| 0.733427
| 0.733427
| 0.733427
| 0.733427
| 0.583921
| 0.583921
| 0
| 0.038642
| 0.284757
| 1,194
| 35
| 109
| 34.114286
| 0.791569
| 0.056951
| 0
| 0.714286
| 1
| 0
| 0.227022
| 0.02114
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22554599a50b8368115407c8f20f64387b4db55f
| 8,314
|
py
|
Python
|
tests/test_fleur_vis.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | null | null | null |
tests/test_fleur_vis.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | null | null | null |
tests/test_fleur_vis.py
|
soumyajyotih/masci-tools
|
e4d9ea2fbf6e16378d0cbfb8828a11bdb09c2139
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test of the DOS/bandstructure visualizations
"""
import os
import pytest
from matplotlib.pyplot import gcf
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
HDFTEST_DIR = os.path.join(CURRENT_DIR, 'files/hdf5_reader')
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_defaults.png')
def test_plot_bands_defaults_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, markersize=30)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_weighted_non_spinpol.png')
def test_plot_bands_weighted_non_spinpol_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, weight='MT:1d')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_defaults_spinpol.png')
def test_plot_bands_spinpol_defaults_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, markersize=30)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_weighted_spinpol.png')
def test_plot_bands_weighted_spinpol_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, weight='MT:1d')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_spinpol_hide.png')
def test_plot_bands_spinpol_no_spinpol_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, markersize=30, spinpol=False)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_only_spin.png')
def test_plot_bands_spinpol_only_spin_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands(data, attributes, show=False, markersize=30, only_spin='up')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='dos_defaults.png')
def test_plot_dos_defaults_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurDOS
from masci_tools.vis.fleur import plot_fleur_dos
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_dos.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurDOS)
gcf().clear()
plot_fleur_dos(data, attributes, show=False)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='dos_param_by_label.png')
def test_plot_dos_param_change_by_label_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurDOS
from masci_tools.vis.fleur import plot_fleur_dos
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_dos.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurDOS)
gcf().clear()
plot_fleur_dos(data, attributes, show=False, color={'MT:1_up': 'red'}, linewidth={'Total_up': 6})
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='dos_param_by_label_with_general_params.png')
def test_plot_dos_param_change_by_label_general_dicts_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurDOS
from masci_tools.vis.fleur import plot_fleur_dos
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_dos.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurDOS)
gcf().clear()
plot_fleur_dos(data,
attributes,
show=False,
color={'MT:1_up': 'red'},
linewidth={'Total_up': 6},
limits={'energy': (-5, 5)},
lines={'vertical': [-1, 0, 1]})
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='spinpol_dos_defaults.png')
def test_plot_spinpol_dos_defaults_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurDOS
from masci_tools.vis.fleur import plot_fleur_dos
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_dos.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurDOS)
gcf().clear()
plot_fleur_dos(data, attributes, show=False)
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='dos_selection.png')
def test_plot_dos_selection_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurDOS
from masci_tools.vis.fleur import plot_fleur_dos
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_dos.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurDOS)
gcf().clear()
plot_fleur_dos(data,
attributes,
show=False,
show_total=False,
show_interstitial=False,
show_atoms=1,
show_lresolved=2,
plot_keys='MT:1p')
return gcf()
@pytest.mark.mpl_image_compare(baseline_dir='files/fleur_vis/', filename='bands_character.png')
def test_plot_bands_characterize_mpl():
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.recipes import FleurBands
from masci_tools.vis.fleur import plot_fleur_bands_characterize
TEST_BANDDOS_FILE = os.path.join(HDFTEST_DIR, 'banddos_spinpol_bands.hdf')
with HDF5Reader(TEST_BANDDOS_FILE) as h5reader:
data, attributes = h5reader.read(recipe=FleurBands)
gcf().clear()
plot_fleur_bands_characterize(data,
attributes, ['MT:1s', 'MT:1p', 'MT:1d', 'MT:1f'],
['darkblue', 'darkred', 'darkgreen', 'darkorange'],
show=False,
markersize=30,
only_spin='up')
return gcf()
| 33.934694
| 118
| 0.722637
| 1,127
| 8,314
| 5.045253
| 0.09583
| 0.056982
| 0.088639
| 0.067534
| 0.912768
| 0.903095
| 0.87988
| 0.867921
| 0.867921
| 0.846817
| 0
| 0.014871
| 0.175006
| 8,314
| 244
| 119
| 34.07377
| 0.814113
| 0.008059
| 0
| 0.690323
| 0
| 0
| 0.103775
| 0.041267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077419
| false
| 0
| 0.251613
| 0
| 0.406452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22642b807bee2172ec8fe3b8d3664d96c5eba11f
| 5,409
|
py
|
Python
|
tests/marketmaking/orderchain/test_preventpostonlycrossingbookelement.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
tests/marketmaking/orderchain/test_preventpostonlycrossingbookelement.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
tests/marketmaking/orderchain/test_preventpostonlycrossingbookelement.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
import argparse
from ...context import mango
from ...fakes import fake_context, fake_model_state, fake_loaded_market, fake_order
from decimal import Decimal
from mango.marketmaking.orderchain.preventpostonlycrossingbookelement import (
PreventPostOnlyCrossingBookElement,
)
# The top bid is the highest price someone is willing to pay to BUY
top_bid: mango.Order = fake_order(
price=Decimal(90), side=mango.Side.BUY, order_type=mango.OrderType.POST_ONLY
)
# The top ask is the lowest price someone is willing to pay to SELL
top_ask: mango.Order = fake_order(
price=Decimal(110), side=mango.Side.SELL, order_type=mango.OrderType.POST_ONLY
)
orderbook: mango.OrderBook = mango.OrderBook(
"TEST", mango.NullLotSizeConverter(), [top_bid], [top_ask]
)
model_state = fake_model_state(market=fake_loaded_market(), orderbook=orderbook)
def test_from_args() -> None:
args: argparse.Namespace = argparse.Namespace()
actual: PreventPostOnlyCrossingBookElement = (
PreventPostOnlyCrossingBookElement.from_command_line_parameters(args)
)
assert actual is not None
def test_not_crossing_results_in_no_change() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(100), order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
result = actual.process(context, model_state, [order])
assert result == [order]
def test_bid_too_high_results_in_new_bid() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(120), side=mango.Side.BUY, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
result = actual.process(context, model_state, [order])
assert result[0].price == 109
def test_bid_too_low_results_in_no_change() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(80), side=mango.Side.BUY, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
result = actual.process(context, model_state, [order])
assert result == [order]
def test_ask_too_low_results_in_new_ask() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(80), side=mango.Side.SELL, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
result = actual.process(context, model_state, [order])
assert result[0].price == 91
def test_ask_too_high_results_in_no_change() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(120), side=mango.Side.SELL, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
result = actual.process(context, model_state, [order])
assert result == [order]
def test_bid_too_high_no_bid_results_in_new_bid() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(120), side=mango.Side.BUY, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
orderbook: mango.OrderBook = mango.OrderBook(
"TEST", mango.NullLotSizeConverter(), [], [top_ask]
)
model_state = fake_model_state(market=fake_loaded_market(), orderbook=orderbook)
result = actual.process(context, model_state, [order])
assert result[0].price == 109
def test_ask_too_low_no_ask_results_in_new_ask() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(80), side=mango.Side.SELL, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
orderbook: mango.OrderBook = mango.OrderBook(
"TEST", mango.NullLotSizeConverter(), [top_bid], []
)
model_state = fake_model_state(market=fake_loaded_market(), orderbook=orderbook)
result = actual.process(context, model_state, [order])
assert result[0].price == 91
def test_ask_no_orderbook_results_in_no_change() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(120), side=mango.Side.SELL, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
orderbook: mango.OrderBook = mango.OrderBook(
"TEST", mango.NullLotSizeConverter(), [], []
)
model_state = fake_model_state(market=fake_loaded_market(), orderbook=orderbook)
result = actual.process(context, model_state, [order])
assert result == [order]
def test_bid_no_orderbook_results_in_no_change() -> None:
context = fake_context()
order: mango.Order = fake_order(
price=Decimal(80), side=mango.Side.BUY, order_type=mango.OrderType.POST_ONLY
)
actual: PreventPostOnlyCrossingBookElement = PreventPostOnlyCrossingBookElement()
orderbook: mango.OrderBook = mango.OrderBook(
"TEST", mango.NullLotSizeConverter(), [], []
)
model_state = fake_model_state(market=fake_loaded_market(), orderbook=orderbook)
result = actual.process(context, model_state, [order])
assert result == [order]
| 33.80625
| 86
| 0.738399
| 625
| 5,409
| 6.136
| 0.1072
| 0.052151
| 0.040156
| 0.054498
| 0.863625
| 0.858931
| 0.842764
| 0.828162
| 0.828162
| 0.828162
| 0
| 0.009229
| 0.158625
| 5,409
| 159
| 87
| 34.018868
| 0.833443
| 0.024219
| 0
| 0.585586
| 0
| 0
| 0.003791
| 0
| 0
| 0
| 0
| 0
| 0.09009
| 1
| 0.09009
| false
| 0
| 0.045045
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97f4076d5790549de42d025a403a5ed8662afdf3
| 44
|
py
|
Python
|
mpxapi/publish/__init__.py
|
colde/mpxapi
|
cda7d06c5c709ba7d652c59156ab7bb213bc2de1
|
[
"MIT"
] | 3
|
2018-01-23T10:27:41.000Z
|
2019-03-29T21:12:25.000Z
|
mpxapi/publish/__init__.py
|
colde/mpxapi
|
cda7d06c5c709ba7d652c59156ab7bb213bc2de1
|
[
"MIT"
] | null | null | null |
mpxapi/publish/__init__.py
|
colde/mpxapi
|
cda7d06c5c709ba7d652c59156ab7bb213bc2de1
|
[
"MIT"
] | 3
|
2018-06-27T14:05:49.000Z
|
2019-09-16T11:28:37.000Z
|
from .publish_profile import PublishProfile
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f7291c20f33e76297855d2358c581069ded89d2
| 196
|
py
|
Python
|
src/model/linearDistance.py
|
Bocampagni/Shipping-api
|
4cdf074467e4478885fe55d7c82a16e1a577b045
|
[
"MIT"
] | null | null | null |
src/model/linearDistance.py
|
Bocampagni/Shipping-api
|
4cdf074467e4478885fe55d7c82a16e1a577b045
|
[
"MIT"
] | null | null | null |
src/model/linearDistance.py
|
Bocampagni/Shipping-api
|
4cdf074467e4478885fe55d7c82a16e1a577b045
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class linearDistance(BaseModel):
first_lon_coordinate: float
first_lat_coordinate: float
second_lon_coordinate: float
second_lat_coordinate: float
| 21.777778
| 32
| 0.80102
| 23
| 196
| 6.478261
| 0.521739
| 0.402685
| 0.241611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 196
| 8
| 33
| 24.5
| 0.908537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
4537b1165c263427beae1e85ba912192f4fecaef
| 44
|
py
|
Python
|
jupyterprobe/__init__.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | 8
|
2020-12-15T14:19:29.000Z
|
2021-09-23T03:39:21.000Z
|
build/lib/jupyterprobe/__init__.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | null | null | null |
build/lib/jupyterprobe/__init__.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | null | null | null |
from jupyterprobe.jupyterprobe import Probe
| 22
| 43
| 0.886364
| 5
| 44
| 7.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
189026be69c9d26f147980238952d00a66d9e3d4
| 59
|
py
|
Python
|
lcu_driver/__init__.py
|
TheodorStraube/lcu-driver
|
892e5695807a0ad27afa411b103a99fd64397f87
|
[
"MIT"
] | 57
|
2019-06-07T09:35:00.000Z
|
2022-03-09T06:31:47.000Z
|
lcu_driver/__init__.py
|
TheodorStraube/lcu-driver
|
892e5695807a0ad27afa411b103a99fd64397f87
|
[
"MIT"
] | 11
|
2020-10-31T02:42:59.000Z
|
2022-03-18T02:46:33.000Z
|
lcu_driver/__init__.py
|
TheodorStraube/lcu-driver
|
892e5695807a0ad27afa411b103a99fd64397f87
|
[
"MIT"
] | 11
|
2021-01-07T19:09:09.000Z
|
2022-03-20T06:54:06.000Z
|
from .connector import Connector, MultipleClientConnector
| 19.666667
| 57
| 0.864407
| 5
| 59
| 10.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 59
| 2
| 58
| 29.5
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.