hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d944c526977c961500124b5b555f2a9016560a1
| 14,812
|
py
|
Python
|
tests/test_caltrack_hourly.py
|
Morilor/eemeter
|
312525bb89119b877d0d905d45c167052b7275f5
|
[
"Apache-2.0"
] | 161
|
2016-08-22T22:38:38.000Z
|
2022-03-24T10:04:05.000Z
|
tests/test_caltrack_hourly.py
|
Morilor/eemeter
|
312525bb89119b877d0d905d45c167052b7275f5
|
[
"Apache-2.0"
] | 313
|
2016-09-12T05:36:28.000Z
|
2022-01-07T21:20:11.000Z
|
tests/test_caltrack_hourly.py
|
Morilor/eemeter
|
312525bb89119b877d0d905d45c167052b7275f5
|
[
"Apache-2.0"
] | 58
|
2016-08-22T22:49:53.000Z
|
2022-01-18T12:18:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.caltrack.hourly import (
caltrack_hourly_fit_feature_processor,
caltrack_hourly_prediction_feature_processor,
fit_caltrack_hourly_model_segment,
fit_caltrack_hourly_model,
)
from eemeter.features import (
compute_time_features,
compute_temperature_features,
compute_usage_per_day_feature,
merge_features,
)
@pytest.fixture
def segmented_data():
index = pd.date_range(start="2017-01-01", periods=24, freq="H", tz="UTC")
time_features = compute_time_features(index)
segmented_data = pd.DataFrame(
{
"hour_of_week": time_features.hour_of_week,
"temperature_mean": np.linspace(0, 100, 24),
"meter_value": np.linspace(10, 70, 24),
"weight": np.ones((24,)),
},
index=index,
)
return segmented_data
@pytest.fixture
def occupancy_lookup():
index = pd.Categorical(range(168))
occupancy = pd.Series([i % 2 == 0 for i in range(168)], index=index)
return pd.DataFrame(
{"dec-jan-feb-weighted": occupancy, "jan-feb-mar-weighted": occupancy}
)
@pytest.fixture
def occupied_temperature_bins():
bins = pd.Series([True, True, True], index=[30, 60, 90])
return pd.DataFrame({"dec-jan-feb-weighted": bins, "jan-feb-mar-weighted": bins})
@pytest.fixture
def unoccupied_temperature_bins():
bins = pd.Series([False, True, True], index=[30, 60, 90])
return pd.DataFrame({"dec-jan-feb-weighted": bins, "jan-feb-mar-weighted": bins})
def test_caltrack_hourly_fit_feature_processor(
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
result = caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
assert list(result.columns) == [
"meter_value",
"hour_of_week",
"bin_0_occupied",
"bin_1_occupied",
"bin_2_occupied",
"bin_3_occupied",
"bin_0_unoccupied",
"bin_1_unoccupied",
"bin_2_unoccupied",
"weight",
]
assert result.shape == (24, 10)
assert round(result.sum().sum(), 2) == 5916.0
def test_caltrack_hourly_prediction_feature_processor(
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
result = caltrack_hourly_prediction_feature_processor(
"dec-jan-feb-weighted",
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
assert list(result.columns) == [
"hour_of_week",
"bin_0_occupied",
"bin_1_occupied",
"bin_2_occupied",
"bin_3_occupied",
"bin_0_unoccupied",
"bin_1_unoccupied",
"bin_2_unoccupied",
"weight",
]
assert result.shape == (24, 9)
assert round(result.sum().sum(), 2) == 4956.0
@pytest.fixture
def segmented_design_matrices(
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
return {
"dec-jan-feb-weighted": caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
}
def test_fit_caltrack_hourly_model_segment(segmented_design_matrices):
segment_name = "dec-jan-feb-weighted"
segment_data = segmented_design_matrices[segment_name]
segment_model = fit_caltrack_hourly_model_segment(segment_name, segment_data)
assert segment_model.formula == (
"meter_value ~ C(hour_of_week) - 1 + bin_0_occupied"
" + bin_1_occupied + bin_2_occupied + bin_3_occupied"
" + bin_0_unoccupied + bin_1_unoccupied + bin_2_unoccupied"
)
assert segment_model.segment_name == "dec-jan-feb-weighted"
assert len(segment_model.model_params.keys()) == 31
assert segment_model.model is not None
assert segment_model.warnings is not None
prediction = segment_model.predict(segment_data)
assert round(prediction.sum(), 2) == 960.0
@pytest.fixture
def temps():
index = pd.date_range(start="2017-01-01", periods=24, freq="H", tz="UTC")
temps = pd.Series(np.linspace(0, 100, 24), index=index)
return temps
def test_fit_caltrack_hourly_model(
segmented_design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
temps,
):
segmented_model_results = fit_caltrack_hourly_model(
segmented_design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
assert segmented_model_results.model.segment_models is not None
assert str(segmented_model_results).startswith("CalTRACKHourlyModelResults")
prediction = segmented_model_results.predict(temps.index, temps).result
def test_serialize_caltrack_hourly_model(
segmented_design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
temps,
):
segmented_model = fit_caltrack_hourly_model(
segmented_design_matrices,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
assert json.dumps(segmented_model.json())
@pytest.fixture
def segmented_data_nans():
num_periods = 200
index = pd.date_range(start="2017-01-01", periods=num_periods, freq="H", tz="UTC")
time_features = compute_time_features(index)
segmented_data = pd.DataFrame(
{
"hour_of_week": time_features.hour_of_week,
"temperature_mean": np.linspace(0, 100, num_periods),
"meter_value": np.linspace(10, 70, num_periods),
"weight": np.ones((num_periods,)),
},
index=index,
)
return segmented_data
@pytest.fixture
def occupancy_lookup_nans():
index = pd.Categorical(range(168))
occupancy = pd.Series([i % 2 == 0 for i in range(168)], index=index)
occupancy_nans = pd.Series([np.nan for i in range(168)], index=index)
return pd.DataFrame(
{
"dec-jan-feb-weighted": occupancy,
"jan-feb-mar-weighted": occupancy,
"apr-may-jun-weighted": occupancy_nans,
}
)
@pytest.fixture
def temperature_bins_nans():
bins = pd.Series([True, True, True], index=[30, 60, 90])
bins_nans = pd.Series([False, False, False], index=[30, 60, 90])
return pd.DataFrame(
{
"dec-jan-feb-weighted": bins,
"jan-feb-mar-weighted": bins,
"apr-may-jun-weighted": bins_nans,
}
)
@pytest.fixture
def segmented_design_matrices_nans(
segmented_data_nans, occupancy_lookup_nans, temperature_bins_nans
):
return {
"dec-jan-feb-weighted": caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data_nans,
occupancy_lookup_nans,
temperature_bins_nans,
temperature_bins_nans,
),
"apr-may-jun-weighted": caltrack_hourly_fit_feature_processor(
"apr-may-jun-weighted",
segmented_data_nans,
occupancy_lookup_nans,
temperature_bins_nans,
temperature_bins_nans,
),
}
def test_fit_caltrack_hourly_model_nans_less_than_week_predict(
segmented_design_matrices_nans,
occupancy_lookup_nans,
temperature_bins_nans,
temps_extended,
temps,
):
segmented_model_results = fit_caltrack_hourly_model(
segmented_design_matrices_nans,
occupancy_lookup_nans,
temperature_bins_nans,
temperature_bins_nans,
)
assert segmented_model_results.model.segment_models is not None
assert segmented_model_results.model.model_lookup["jan"].model is not None
assert segmented_model_results.model.model_lookup["may"].model is not None
assert segmented_model_results.model.model_lookup["may"].warnings == []
prediction = segmented_model_results.predict(temps.index, temps).result
assert prediction.shape[0] == 24
assert prediction["predicted_usage"].sum().round() == 955.0
@pytest.fixture
def segmented_data_nans_less_than_week():
num_periods = 4
index = pd.date_range(start="2017-01-01", periods=num_periods, freq="H", tz="UTC")
time_features = compute_time_features(index)
segmented_data = pd.DataFrame(
{
"hour_of_week": time_features.hour_of_week,
"temperature_mean": np.linspace(0, 100, num_periods),
"meter_value": np.linspace(10, 70, num_periods),
"weight": np.ones((num_periods,)),
},
index=index,
)
return segmented_data
@pytest.fixture
def occupancy_lookup_nans_less_than_week():
index = pd.Categorical(range(168))
occupancy = pd.Series([i % 2 == 0 for i in range(168)], index=index)
occupancy_nans_less_than_week = pd.Series([np.nan for i in range(168)], index=index)
return pd.DataFrame(
{
"dec-jan-feb-weighted": occupancy,
"jan-feb-mar-weighted": occupancy,
"apr-may-jun-weighted": occupancy_nans_less_than_week,
}
)
@pytest.fixture
def temperature_bins_nans_less_than_week():
bins = pd.Series([True, True, True], index=[30, 60, 90])
bins_nans_less_than_week = pd.Series([False, False, False], index=[30, 60, 90])
return pd.DataFrame(
{
"dec-jan-feb-weighted": bins,
"jan-feb-mar-weighted": bins,
"apr-may-jun-weighted": bins_nans_less_than_week,
}
)
@pytest.fixture
def segmented_design_matrices_nans_less_than_week(
segmented_data_nans_less_than_week,
occupancy_lookup_nans_less_than_week,
temperature_bins_nans_less_than_week,
):
return {
"dec-jan-feb-weighted": caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data_nans_less_than_week,
occupancy_lookup_nans_less_than_week,
temperature_bins_nans_less_than_week,
temperature_bins_nans_less_than_week,
),
"apr-may-jun-weighted": caltrack_hourly_fit_feature_processor(
"apr-may-jun-weighted",
segmented_data_nans_less_than_week,
occupancy_lookup_nans_less_than_week,
temperature_bins_nans_less_than_week,
temperature_bins_nans_less_than_week,
),
}
@pytest.fixture
def temps_extended():
index = pd.date_range(start="2017-01-01", periods=168, freq="H", tz="UTC")
temps = pd.Series(1, index=index)
return temps
def test_fit_caltrack_hourly_model_nans_less_than_week_fit(
segmented_design_matrices_nans_less_than_week,
occupancy_lookup_nans_less_than_week,
temperature_bins_nans_less_than_week,
temps_extended,
):
segmented_model_results = fit_caltrack_hourly_model(
segmented_design_matrices_nans_less_than_week,
occupancy_lookup_nans_less_than_week,
temperature_bins_nans_less_than_week,
temperature_bins_nans_less_than_week,
)
assert segmented_model_results.model.segment_models is not None
prediction = segmented_model_results.predict(
temps_extended.index, temps_extended
).result
assert prediction.shape[0] == 168
assert prediction.dropna().shape[0] == 4
@pytest.fixture
def segmented_design_matrices_empty_models(
segmented_data,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
return {
"dec-jan-feb-weighted": caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data[:0],
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
}
def test_predict_caltrack_hourly_model_empty_models(
temps,
segmented_design_matrices_empty_models,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
segmented_model_results = fit_caltrack_hourly_model(
segmented_design_matrices_empty_models,
occupancy_lookup,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
assert segmented_model_results.model.segment_models is not None
prediction = segmented_model_results.predict(temps.index, temps).result
assert prediction.shape[0] == 24
assert prediction.dropna().shape[0] == 0
@pytest.fixture
def occupancy_lookup_zeroes():
index = pd.Categorical(range(168))
occupancy = pd.Series([False] * 168, index=index)
return pd.DataFrame(
{"dec-jan-feb-weighted": occupancy, "jan-feb-mar-weighted": occupancy}
)
@pytest.fixture
def segmented_design_matrices_single_mode(
segmented_data,
occupancy_lookup_zeroes,
occupied_temperature_bins,
unoccupied_temperature_bins,
):
return {
"dec-jan-feb-weighted": caltrack_hourly_fit_feature_processor(
"dec-jan-feb-weighted",
segmented_data,
occupancy_lookup_zeroes,
occupied_temperature_bins,
unoccupied_temperature_bins,
)
}
def test_fit_caltrack_hourly_model_segment_single_mode(
segmented_design_matrices_single_mode
):
segment_name = "dec-jan-feb-weighted"
segment_data = segmented_design_matrices_single_mode[segment_name]
segment_model = fit_caltrack_hourly_model_segment(segment_name, segment_data)
assert segment_model.formula == (
"meter_value ~ C(hour_of_week) - 1 + bin_0_occupied + bin_1_occupied"
" + bin_2_occupied + bin_3_occupied + bin_0_unoccupied + bin_1_unoccupied"
" + bin_2_unoccupied"
)
assert segment_model.segment_name == "dec-jan-feb-weighted"
assert len(segment_model.model_params.keys()) == 31
assert segment_model.model is not None
assert segment_model.warnings is not None
prediction = segment_model.predict(segment_data)
assert round(prediction.sum(), 2) == 960.0
| 31.052411
| 88
| 0.690049
| 1,817
| 14,812
| 5.267474
| 0.10732
| 0.081496
| 0.035106
| 0.046808
| 0.883189
| 0.842754
| 0.805872
| 0.786125
| 0.777035
| 0.761362
| 0
| 0.021962
| 0.216109
| 14,812
| 476
| 89
| 31.117647
| 0.802343
| 0.041318
| 0
| 0.663317
| 0
| 0
| 0.114839
| 0.001836
| 0
| 0
| 0
| 0
| 0.082915
| 1
| 0.067839
| false
| 0
| 0.015075
| 0.012563
| 0.128141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3daddab9450165a26aaa1dfd0d06224e21bc8e0a
| 16,481
|
py
|
Python
|
test/programytest/extensions/scheduler/test_scheduler.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
test/programytest/extensions/scheduler/test_scheduler.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
test/programytest/extensions/scheduler/test_scheduler.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
import unittest
from programy.extensions.scheduler.scheduler import SchedulerExtension
from programytest.client import TestClient
class SchedulerExtensionClient(TestClient):
def __init__(self, mock_scheduler=None):
self._mock_scheduler = mock_scheduler
TestClient.__init__(self)
def load_configuration(self, arguments):
super(SchedulerExtensionClient, self).load_configuration(arguments)
def load_scheduler(self):
if self._mock_scheduler is not None:
self._scheduler = self._mock_scheduler
else:
super(SchedulerExtensionClient, self).load_scheduler()
class MockJob:
def __init__(self, id, userid):
self.args = [id, userid]
@property
def id(self):
return self.args[0]
class MockScheduler:
def __init__(self):
self._jobs = ()
def add_jobs(self, jobs):
self._jobs = jobs
def list_jobs(self):
return self._jobs
def pause_job (self, id):
pass
def resume_job (self, id):
pass
def stop_job (self, id):
pass
def schedule_every_n_seconds(self, userid, clientid, action, text, seconds):
pass
def schedule_every_n_minutes(self, userid, clientid, action, text, minutes):
pass
def schedule_every_n_hours(self, userid, clientid, action, text, hours):
pass
def schedule_every_n_days(self, userid, clientid, action, text, days):
pass
def schedule_every_n_weeks(self, userid, clientid, action, text, weeks):
pass
def schedule_every_n(self, userid, clientid, action, text, weeks=0, days=0, hours=0, minutes=0, seconds=0):
pass
def schedule_in_n_weeks(self, userid, clientid, action, text, weeks):
pass
def schedule_in_n_days(self, userid, clientid, action, text, days):
pass
def schedule_in_n_hours(self, userid, clientid, action, text, hours):
pass
def schedule_in_n_minutes(self, userid, clientid, action, text, minutes):
pass
def schedule_in_n_seconds(self, userid, clientid, action, text, seconds):
pass
class SchedulerExtensionTests(unittest.TestCase):
# SCHEDULE IN|EVERY X SECS|MINS|HOURS|DAYS|WEEKS TEXT|SRAI TEXT ...........
# PAUSE ALL|JOBID
# RESUME ALL|JOBID
# STOP ALL|JOBID
# LIST
def test_schedule_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE OTHER"))
def test_schedule_in_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES TEXT"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES SRAI"))
def test_schedule_every_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVERY"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES TEXT"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES SRAI"))
# IN XXXX
def test_schedule_in_n_seconds(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 SECONDS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_minutes(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 MINUTES TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_hours(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 HOURS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_days(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 DAYS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_weeks(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 WEEKS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
# EVERY XXX
def test_schedule_every_n_seconds(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 SECONDS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_minutes(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 MINUTES TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_hours(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 HOURS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_days(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 DAYS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_weeks(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 WEEKS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
# Other commands
def test_pause_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE ALL")
self.assertEquals("OK", response)
def test_pause_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE ALL")
self.assertEquals("ERR", response)
def test_pause_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("OK", response)
def test_pause_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 2")
self.assertEquals("ERR", response)
def test_pause_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("ERR", response)
def test_pause_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("ERR", response)
def test_resume_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME ALL")
self.assertEquals("OK", response)
def test_resume_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME ALL")
self.assertEquals("ERR", response)
def test_resume_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("OK", response)
def test_resume_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 2")
self.assertEquals("ERR", response)
def test_resume_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("ERR", response)
def test_resume_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("ERR", response)
def test_stop_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP ALL")
self.assertEquals("OK", response)
def test_stop_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP ALL")
self.assertEquals("ERR", response)
def test_stop_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("OK", response)
def test_stop_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 2")
self.assertEquals("ERR", response)
def test_stop_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("ERR", response)
def test_stop_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("ERR", response)
def test_list(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("OK <olist><item>1</item></olist>", response)
def test_list_mulit_userids(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid"), 2: MockJob(2, "testid2"), 3: MockJob(3, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("OK <olist><item>1</item><item>3</item></olist>", response)
def test_list_no_userid_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("ERR", response)
| 40.493857
| 127
| 0.696802
| 1,726
| 16,481
| 6.409618
| 0.052144
| 0.176263
| 0.121938
| 0.117961
| 0.911597
| 0.897135
| 0.888999
| 0.876797
| 0.869656
| 0.835578
| 0
| 0.007617
| 0.203446
| 16,481
| 406
| 128
| 40.593596
| 0.835085
| 0.009647
| 0
| 0.652459
| 0
| 0
| 0.093055
| 0.004414
| 0
| 0
| 0
| 0
| 0.147541
| 1
| 0.183607
| false
| 0.045902
| 0.009836
| 0.006557
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9a8e7b31fad144ec998819e21cb8e9f6c82d9a34
| 2,449
|
py
|
Python
|
util/score_ctc.py
|
trinhtuanvubk/KWS-based-ASR
|
56ca095a903637ba92527fa3230bccd3357afa91
|
[
"MIT"
] | 1
|
2022-03-16T07:01:00.000Z
|
2022-03-16T07:01:00.000Z
|
util/score_ctc.py
|
trinhtuanvubk/KWS-based-ASR
|
56ca095a903637ba92527fa3230bccd3357afa91
|
[
"MIT"
] | null | null | null |
util/score_ctc.py
|
trinhtuanvubk/KWS-based-ASR
|
56ca095a903637ba92527fa3230bccd3357afa91
|
[
"MIT"
] | null | null | null |
# import nemo
import nemo.collections.asr as nemo_asr
import torch
import numpy as np
import torch.tensor
# import math
import util
def score_stream(data,matrix_learned_phoneme,matrix_w):
model_path = "./lightning_logs/version_7/checkpoints/epoch=2-step=21404.ckpt"
asr_model = nemo_asr.models.EncDecCTCModel.load_from_checkpoint(checkpoint_path = model_path)
# calculate score
# files = [path]
logprobs = []
# for fname, prob in zip(files, asr_model.transcribe(paths2audio_files = files , logprobs=1)) :
# tensor_probs = (torch.exp(probs).numpy()).tolist()
# tensor_probs = prob
# print(tensor_probs)
tensor_probs = asr_model.transcribe(data,logprobs=1)
tensor_probs = np.exp(tensor_probs.numpy())
# print(tensor_probs)
# print(np.max(tensor_probs,axis=1))
# print(type(tensor_probs[0]))
# tensor_probs = [np.exp(i) for i in tensor_probs]
for phoneme in matrix_learned_phoneme:
probs = util.CTCforward(learned_phoneme = phoneme,matrix = tensor_probs)
# print(probs)
logprobs.append(np.log(probs))
# print(type(logprobs[0]))
# score
# score = sum([logprobs[i]*matrix_w[i]] for i in range(len(matrix_w)))
score = sum([i*j for i,j in zip(logprobs,matrix_w)])
return score
def score_ctc(path, matrix_learned_phoneme, matrix_w) :
model_path = "./lightning_logs/version_7/checkpoints/epoch=2-step=21404.ckpt"
asr_model = nemo_asr.models.EncDecCTCModel.load_from_checkpoint(checkpoint_path = model_path)
# calculate score
files = [path]
logprobs = []
for fname, prob in zip(files, asr_model.transcribe(paths2audio_files = files , logprobs=1)) :
# tensor_probs = (torch.exp(probs).numpy()).tolist()
tensor_probs = prob
# print(tensor_probs)
tensor_probs = np.exp(tensor_probs.numpy())
# print(tensor_probs)
# print(np.max(tensor_probs,axis=1))
# print(type(tensor_probs[0]))
# tensor_probs = [np.exp(i) for i in tensor_probs]
for phoneme in matrix_learned_phoneme:
probs = util.CTCforward(learned_phoneme = phoneme,matrix = tensor_probs)
# print(probs)
logprobs.append(np.log(probs))
# print(type(logprobs[0]))
# score
# score = sum([logprobs[i]*matrix_w[i]] for i in range(len(matrix_w)))
score = sum([i*j for i,j in zip(logprobs,matrix_w)])
return score
| 35.492754
| 99
| 0.670478
| 336
| 2,449
| 4.6875
| 0.193452
| 0.160635
| 0.050794
| 0.040635
| 0.892063
| 0.892063
| 0.892063
| 0.892063
| 0.892063
| 0.892063
| 0
| 0.0129
| 0.208657
| 2,449
| 68
| 100
| 36.014706
| 0.799794
| 0.336056
| 0
| 0.62069
| 0
| 0
| 0.077597
| 0.077597
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9aa3d99b8bf882af4f0e3614935b5995fa917b51
| 88
|
py
|
Python
|
autograd_cupy/sparse/sparse_jvps.py
|
ericmjl/autograd-cupy
|
493a90cabae42f9e0fdbea77cef758aff659604f
|
[
"MIT"
] | 3
|
2018-08-03T00:11:17.000Z
|
2018-12-27T17:47:54.000Z
|
autograd_cupy/sparse/sparse_jvps.py
|
ericmjl/autograd-cupy
|
493a90cabae42f9e0fdbea77cef758aff659604f
|
[
"MIT"
] | null | null | null |
autograd_cupy/sparse/sparse_jvps.py
|
ericmjl/autograd-cupy
|
493a90cabae42f9e0fdbea77cef758aff659604f
|
[
"MIT"
] | null | null | null |
from autograd.extend import def_linear
from .sparse_wrapper import dot
def_linear(dot)
| 17.6
| 38
| 0.840909
| 14
| 88
| 5.071429
| 0.642857
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 88
| 4
| 39
| 22
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ab9435ec1f02262afd4f4bb84c025bded364f6d
| 75
|
py
|
Python
|
scpl/parser/__init__.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | null | null | null |
scpl/parser/__init__.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | 2
|
2021-11-15T11:12:14.000Z
|
2021-11-15T17:35:27.000Z
|
scpl/parser/__init__.py
|
jesopo/scpl
|
1fa5acfb468ab212276781fa1760bb5eda438c23
|
[
"MIT"
] | null | null | null |
from .parser import *
from .operands import *
from .operators import *
| 18.75
| 24
| 0.706667
| 9
| 75
| 5.888889
| 0.555556
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213333
| 75
| 3
| 25
| 25
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9af9ff9ce17f38cb0bb39d7de4c3f27e10cfab06
| 41
|
py
|
Python
|
TOPSIS_Prakhar_101803126/__init__.py
|
PrakharJindal/Topsis-Pypi-Package
|
43a484d20aae5f4c8052295f432fafb6ba47aed4
|
[
"MIT"
] | null | null | null |
TOPSIS_Prakhar_101803126/__init__.py
|
PrakharJindal/Topsis-Pypi-Package
|
43a484d20aae5f4c8052295f432fafb6ba47aed4
|
[
"MIT"
] | null | null | null |
TOPSIS_Prakhar_101803126/__init__.py
|
PrakharJindal/Topsis-Pypi-Package
|
43a484d20aae5f4c8052295f432fafb6ba47aed4
|
[
"MIT"
] | null | null | null |
from .topsis import CalculateTopsisScore
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b127ef316f1dd9b3d78f019706fc00e7bcca2096
| 67
|
py
|
Python
|
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/cdr_provider/query_handlers/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/cdr_provider/query_handlers/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/instances/asterisk_16/cdr_provider/query_handlers/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .GetRecordFileUniqueIdQuery import GetRecordFileUniqueIdQuery
| 33.5
| 66
| 0.925373
| 4
| 67
| 15.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 67
| 1
| 67
| 67
| 0.984127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b180c4c0a84ab038cab49981ad98faeffc5b12cb
| 106
|
py
|
Python
|
reqherd/webservice/crud/__init__.py
|
zthurman/reqherd
|
6b35c4f22d4e28c363f82a5f3331657f8244a589
|
[
"Apache-2.0"
] | null | null | null |
reqherd/webservice/crud/__init__.py
|
zthurman/reqherd
|
6b35c4f22d4e28c363f82a5f3331657f8244a589
|
[
"Apache-2.0"
] | null | null | null |
reqherd/webservice/crud/__init__.py
|
zthurman/reqherd
|
6b35c4f22d4e28c363f82a5f3331657f8244a589
|
[
"Apache-2.0"
] | null | null | null |
from ..crud.sysreqs import sysreq
from ..crud.softreqs import softreq
from ..crud.hardreqs import hardreq
| 26.5
| 35
| 0.801887
| 15
| 106
| 5.666667
| 0.6
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 3
| 36
| 35.333333
| 0.904255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b184e119b4b603b4d9b7b4ce01a81fb46c2c80ea
| 2,534
|
py
|
Python
|
Zhangjiashan_vmd/projects/generate_samples.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | 2
|
2020-09-24T13:31:06.000Z
|
2020-11-11T09:08:16.000Z
|
Zhangjiashan_vmd/projects/generate_samples.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | null | null | null |
Zhangjiashan_vmd/projects/generate_samples.py
|
UnIcOrn7618/MonthlyRunoffForecastByAutoReg
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
[
"MIT"
] | 1
|
2020-12-16T07:29:32.000Z
|
2020-12-16T07:29:32.000Z
|
import os
root_path = os.path.dirname(os.path.abspath("__file__"))
from variables import variables
import sys
sys.path.append(root_path)
from tools.samples_generator import gen_one_step_forecast_samples_triandev_test
from tools.samples_generator import gen_multi_step_forecast_samples
from tools.samples_generator import gen_one_step_forecast_samples
gen_one_step_forecast_samples_triandev_test(
station="Zhangjiashan",
decomposer="vmd",
lags_dict = variables['lags_dict'],
input_columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
output_column=['ORIG'],
start=673,
stop=792,
test_len=120,
)
for lead_time in [1,3,5,7,9]:
gen_one_step_forecast_samples(
station = "Zhangjiashan",
decomposer="vmd",
lags_dict = variables['lags_dict'],
input_columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
output_column=['ORIG'],
start=553,
stop=792,
test_len=120,
mode = 'PACF',
lead_time =lead_time,
)
for lead_time in [3,5,7,9]:
gen_one_step_forecast_samples(
station = "Zhangjiashan",
decomposer="vmd",
lags_dict = variables['lags_dict'],
input_columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
output_column=['ORIG'],
start=553,
stop=792,
test_len=120,
mode = 'Pearson',
lead_time =lead_time,
)
gen_multi_step_forecast_samples(
station='Zhangjiashan',
decomposer="vmd",
lags_dict = variables['lags_dict'],
columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
start=553,
stop=792,
test_len=120,
)
gen_one_step_forecast_samples(
station = "Zhangjiashan",
decomposer="vmd",
lags_dict = variables['lags_dict'],
input_columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
output_column=['ORIG'],
start=553,
stop=792,
test_len=120,
mode = 'PACF',
lead_time =1,
n_components='mle',
)
num_in_one = sum(variables['lags_dict'].values())
for n_components in range(num_in_one-16,num_in_one+1):
gen_one_step_forecast_samples(
station = "Zhangjiashan",
decomposer="vmd",
lags_dict = variables['lags_dict'],
input_columns=['IMF1','IMF2','IMF3','IMF4','IMF5','IMF6','IMF7',],
output_column=['ORIG'],
start=553,
stop=792,
test_len=120,
mode = 'PACF',
lead_time =1,
n_components=n_components,
)
| 27.543478
| 79
| 0.624704
| 317
| 2,534
| 4.690852
| 0.208202
| 0.069939
| 0.114997
| 0.084734
| 0.829859
| 0.796907
| 0.774042
| 0.724277
| 0.70074
| 0.70074
| 0
| 0.055781
| 0.221784
| 2,534
| 91
| 80
| 27.846154
| 0.698276
| 0
| 0
| 0.679012
| 0
| 0
| 0.146409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49351ddfde6e63ce6aa8591deb6001db57407d0c
| 24
|
py
|
Python
|
vk/types/events/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 24
|
2019-09-13T15:30:09.000Z
|
2022-03-09T06:35:59.000Z
|
vk/types/events/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | null | null | null |
vk/types/events/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 12
|
2019-09-13T15:30:31.000Z
|
2022-03-01T10:13:32.000Z
|
from . import community
| 12
| 23
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49734e2713a3c200dc972855cc36eeeba9ed7369
| 99
|
py
|
Python
|
src/textdata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | null | null | null |
src/textdata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | 8
|
2018-01-03T01:27:06.000Z
|
2018-01-03T01:32:33.000Z
|
src/textdata.py
|
little-quokka/py-quokka-block
|
d6593087c1f027af80c8968ac113c1ccb2cf7f55
|
[
"MIT"
] | null | null | null |
from abstractdatapackage import AbstractDataPackage
class TextData(AbstractDataPackage):
pass
| 19.8
| 51
| 0.848485
| 8
| 99
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 99
| 5
| 52
| 19.8
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
497863b677cf4c4a60dc3b9419cf1a3b4b6583bc
| 9,835
|
py
|
Python
|
migrations/versions/9ff331ab950b_v0_1_0.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 2
|
2021-08-19T12:35:25.000Z
|
2022-02-16T04:13:38.000Z
|
migrations/versions/9ff331ab950b_v0_1_0.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 46
|
2021-09-02T03:22:05.000Z
|
2022-03-31T09:20:00.000Z
|
migrations/versions/9ff331ab950b_v0_1_0.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 1
|
2021-11-17T23:18:27.000Z
|
2021-11-17T23:18:27.000Z
|
"""v0.1.0
Revision ID: 9ff331ab950b
Revises:
Create Date: 2021-03-19 21:03:52.102757
"""
from alembic import op
import sqlalchemy as sa
from app.database import get_db_schema
# revision identifiers, used by Alembic.
revision = '9ff331ab950b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('account',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('keyfile', sa.JSON(), nullable=True),
sa.Column('eoa_password', sa.String(length=2000), nullable=True),
sa.Column('rsa_private_key', sa.String(length=8000), nullable=True),
sa.Column('rsa_public_key', sa.String(length=2000), nullable=True),
sa.Column('rsa_passphrase', sa.String(length=2000), nullable=True),
sa.Column('rsa_status', sa.Integer(), nullable=True),
sa.Column('is_deleted', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('issuer_address')
, schema=get_db_schema())
op.create_table('account_rsa_key_temporary',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('rsa_private_key', sa.String(length=8000), nullable=True),
sa.Column('rsa_public_key', sa.String(length=2000), nullable=True),
sa.Column('rsa_passphrase', sa.String(length=2000), nullable=True),
sa.PrimaryKeyConstraint('issuer_address')
, schema=get_db_schema())
op.create_table('bulk_transfer',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('upload_id', sa.String(length=36), nullable=True),
sa.Column('token_address', sa.String(length=42), nullable=False),
sa.Column('token_type', sa.String(length=40), nullable=False),
sa.Column('from_address', sa.String(length=42), nullable=False),
sa.Column('to_address', sa.String(length=42), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_index(op.f('ix_bulk_transfer_issuer_address'), 'bulk_transfer', ['issuer_address'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_bulk_transfer_status'), 'bulk_transfer', ['status'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_bulk_transfer_upload_id'), 'bulk_transfer', ['upload_id'], unique=False, schema=get_db_schema())
op.create_table('bulk_transfer_upload',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('upload_id', sa.String(length=36), nullable=False),
sa.Column('issuer_address', sa.String(length=42), nullable=False),
sa.Column('token_type', sa.String(length=40), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('upload_id')
, schema=get_db_schema())
op.create_index(op.f('ix_bulk_transfer_upload_issuer_address'), 'bulk_transfer_upload', ['issuer_address'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_bulk_transfer_upload_status'), 'bulk_transfer_upload', ['status'], unique=False, schema=get_db_schema())
op.create_table('idx_personal_info',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('account_address', sa.String(length=42), nullable=True),
sa.Column('issuer_address', sa.String(length=42), nullable=True),
sa.Column('personal_info', sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_index(op.f('ix_idx_personal_info_account_address'), 'idx_personal_info', ['account_address'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_idx_personal_info_issuer_address'), 'idx_personal_info', ['issuer_address'], unique=False, schema=get_db_schema())
op.create_table('idx_personal_info_block_number',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('latest_block_number', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_table('idx_position',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('token_address', sa.String(length=42), nullable=True),
sa.Column('account_address', sa.String(length=42), nullable=True),
sa.Column('balance', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_index(op.f('ix_idx_position_account_address'), 'idx_position', ['account_address'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_idx_position_token_address'), 'idx_position', ['token_address'], unique=False, schema=get_db_schema())
op.create_table('idx_transfer',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('transaction_hash', sa.String(length=66), nullable=True),
sa.Column('token_address', sa.String(length=42), nullable=True),
sa.Column('transfer_from', sa.String(length=42), nullable=True),
sa.Column('transfer_to', sa.String(length=42), nullable=True),
sa.Column('amount', sa.BigInteger(), nullable=True),
sa.Column('block_timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_index(op.f('ix_idx_transfer_token_address'), 'idx_transfer', ['token_address'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_idx_transfer_transaction_hash'), 'idx_transfer', ['transaction_hash'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_idx_transfer_transfer_from'), 'idx_transfer', ['transfer_from'], unique=False, schema=get_db_schema())
op.create_index(op.f('ix_idx_transfer_transfer_to'), 'idx_transfer', ['transfer_to'], unique=False, schema=get_db_schema())
op.create_table('token',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('type', sa.String(length=40), nullable=False),
sa.Column('tx_hash', sa.String(length=66), nullable=False),
sa.Column('issuer_address', sa.String(length=42), nullable=True),
sa.Column('token_address', sa.String(length=42), nullable=True),
sa.Column('abi', sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint('id')
, schema=get_db_schema())
op.create_table('tx_management',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('tx_from', sa.String(length=42), nullable=False),
sa.PrimaryKeyConstraint('tx_from')
, schema=get_db_schema())
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tx_management', schema=get_db_schema())
op.drop_table('token', schema=get_db_schema())
op.drop_index(op.f('ix_idx_transfer_transfer_to'), table_name='idx_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_idx_transfer_transfer_from'), table_name='idx_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_idx_transfer_transaction_hash'), table_name='idx_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_idx_transfer_token_address'), table_name='idx_transfer', schema=get_db_schema())
op.drop_table('idx_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_idx_position_token_address'), table_name='idx_position', schema=get_db_schema())
op.drop_index(op.f('ix_idx_position_account_address'), table_name='idx_position', schema=get_db_schema())
op.drop_table('idx_position', schema=get_db_schema())
op.drop_table('idx_personal_info_block_number', schema=get_db_schema())
op.drop_index(op.f('ix_idx_personal_info_issuer_address'), table_name='idx_personal_info', schema=get_db_schema())
op.drop_index(op.f('ix_idx_personal_info_account_address'), table_name='idx_personal_info', schema=get_db_schema())
op.drop_table('idx_personal_info', schema=get_db_schema())
op.drop_index(op.f('ix_bulk_transfer_upload_status'), table_name='bulk_transfer_upload', schema=get_db_schema())
op.drop_index(op.f('ix_bulk_transfer_upload_issuer_address'), table_name='bulk_transfer_upload', schema=get_db_schema())
op.drop_table('bulk_transfer_upload', schema=get_db_schema())
op.drop_index(op.f('ix_bulk_transfer_upload_id'), table_name='bulk_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_bulk_transfer_status'), table_name='bulk_transfer', schema=get_db_schema())
op.drop_index(op.f('ix_bulk_transfer_issuer_address'), table_name='bulk_transfer', schema=get_db_schema())
op.drop_table('bulk_transfer', schema=get_db_schema())
op.drop_table('account_rsa_key_temporary', schema=get_db_schema())
op.drop_table('account', schema=get_db_schema())
# ### end Alembic commands ###
| 61.086957
| 149
| 0.726894
| 1,396
| 9,835
| 4.846705
| 0.07808
| 0.081584
| 0.076411
| 0.115578
| 0.894177
| 0.874963
| 0.864617
| 0.843778
| 0.796482
| 0.72347
| 0
| 0.012815
| 0.103406
| 9,835
| 161
| 150
| 61.086957
| 0.754479
| 0.02786
| 0
| 0.451389
| 0
| 0
| 0.246588
| 0.094688
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0.020833
| 0.020833
| 0
| 0.034722
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
772092ec6765810376e1feab85f3837c4d1bcc80
| 1,681
|
py
|
Python
|
random-images/WallpaperGenerator/test.py
|
dominicschaff/random
|
14a19b976a09c768ab8844b7cda237c17a92c9ae
|
[
"MIT"
] | null | null | null |
random-images/WallpaperGenerator/test.py
|
dominicschaff/random
|
14a19b976a09c768ab8844b7cda237c17a92c9ae
|
[
"MIT"
] | null | null | null |
random-images/WallpaperGenerator/test.py
|
dominicschaff/random
|
14a19b976a09c768ab8844b7cda237c17a92c9ae
|
[
"MIT"
] | null | null | null |
from __future__ import division
from math import radians as rad, pi, e
from constants import *
import functions
import sys
dr = DrawImage()
dr.create()
dr.plotRadians(functions.butterfly,
start = 0,
end = 10000,
offset = (dr.width/2, dr.height/4),
steps = 0.1,
scale = 20,
colour = (18,182,252),
rotation = 180)
print "Done: Butterfly"
dr.plotRadians(functions.butterfly,
start = 0,
end = 10000,
offset = (dr.width/2, dr.height/4*3),
steps = 0.1,
scale = 20,
colour = (18,182,252),
rotation = 0)
print "Done: Butterfly"
dr.plotRadians(functions.butterfly,
start = 0,
end = 10000,
offset = (dr.width/4, dr.height/2),
steps = 0.1,
scale = 20,
colour = (18,182,252),
rotation = 180)
print "Done: Butterfly"
dr.plotRadians(functions.butterfly,
start = 0,
end = 10000,
offset = (dr.width/4*3, dr.height/2),
steps = 0.1,
scale = 20,
colour = (18,182,252),
rotation = 180)
print "Done: Butterfly"
for i in drange(50,70.5,0.5):
dr.plotRadians(functions.leaf,
start = 0,
end = 10000,
offset = (dr.width/2, dr.height/2+25),
steps = 0.1,
scale = i,
colour = (252,18,252),
rotation = 270)
dr.plotRadians(functions.leaf,
start = 0,
end = 10000,
offset = (dr.width/2, dr.height/2+25),
steps = 0.1,
scale = i,
colour = (252,18,252),
rotation = 90)
print "Done: Leaf:", i
d2 = DrawImage()
d2.open('image1.png')
dr.addOver(d2)
dr.save('image3.png')
| 23.676056
| 48
| 0.54075
| 221
| 1,681
| 4.095023
| 0.262443
| 0.086188
| 0.145856
| 0.092818
| 0.770166
| 0.770166
| 0.770166
| 0.770166
| 0.770166
| 0.770166
| 0
| 0.130245
| 0.319453
| 1,681
| 71
| 49
| 23.676056
| 0.660839
| 0
| 0
| 0.692308
| 0
| 0
| 0.054102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
772d89f1141a86458c1f48806e1b7d511a4d7d59
| 30,179
|
py
|
Python
|
python/trezorlib/tests/device_tests/test_msg_signtx_bcash.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | null | null | null |
python/trezorlib/tests/device_tests/test_msg_signtx_bcash.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | 1
|
2019-02-08T00:22:42.000Z
|
2019-02-13T09:41:54.000Z
|
python/trezorlib/tests/device_tests/test_msg_signtx_bcash.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | 2
|
2019-02-07T23:57:09.000Z
|
2020-10-21T07:07:27.000Z
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import btc, messages as proto
from trezorlib.tools import H_, CallException, parse_path
from ..support.ckd_public import deserialize
from ..support.tx_cache import tx_cache
from .common import TrezorTest
TX_API = tx_cache("Bcash")
class TestMsgSigntxBch(TrezorTest):
def test_send_bch_change(self):
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/0/0"),
# bitcoincash:qr08q88p9etk89wgv05nwlrkm4l0urz4cyl36hh9sv
amount=1995344,
prev_hash=bytes.fromhex(
"bc37c28dfb467d2ecb50261387bf752a3977d7e5337915071bb4151e6b711a78"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address_n=parse_path("44'/145'/0'/1/0"),
amount=1896050,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="bitcoincash:qr23ajjfd9wd73l87j642puf8cad20lfmqdgwvpat4",
amount=73452,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
_, serialized_tx = btc.sign_tx(
self.client, "Bcash", [inp1], [out1, out2], prev_txes=TX_API
)
assert (
serialized_tx.hex()
== "0100000001781a716b1e15b41b07157933e5d777392a75bf87132650cb2e7d46fb8dc237bc000000006a473044022061aee4f17abe044d5df8c52c9ffd3b84e5a29743517e488b20ecf1ae0b3e4d3a02206bb84c55e407f3b684ff8d9bea0a3409cfd865795a19d10b3d3c31f12795c34a412103a020b36130021a0f037c1d1a02042e325c0cb666d6478c1afdcd9d913b9ef080ffffffff0272ee1c00000000001976a914b1401fce7e8bf123c88a0467e0ed11e3b9fbef5488acec1e0100000000001976a914d51eca49695cdf47e7f4b55507893e3ad53fe9d888ac00000000"
)
def test_send_bch_nochange(self):
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/1/0"),
# bitcoincash:qzc5q87w069lzg7g3gzx0c8dz83mn7l02scej5aluw
amount=1896050,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDADDRESS,
)
inp2 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/0/1"),
# bitcoincash:qr23ajjfd9wd73l87j642puf8cad20lfmqdgwvpat4
amount=73452,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=1,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address="bitcoincash:qq6wnnkrz7ykaqvxrx4hmjvayvzjzml54uyk76arx4",
amount=1934960,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
_, serialized_tx = btc.sign_tx(
self.client, "Bcash", [inp1, inp2], [out1], prev_txes=TX_API
)
assert (
serialized_tx.hex()
== "01000000022c06cf6f215c5cbfd7caa8e71b1b32630cabf1f816a4432815b037b277852e50000000006a47304402207a2a955f1cb3dc5f03f2c82934f55654882af4e852e5159639f6349e9386ec4002205fb8419dce4e648eae8f67bc4e369adfb130a87d2ea2d668f8144213b12bb457412103174c61e9c5362507e8061e28d2c0ce3d4df4e73f3535ae0b12f37809e0f92d2dffffffff2c06cf6f215c5cbfd7caa8e71b1b32630cabf1f816a4432815b037b277852e50010000006a473044022062151cf960b71823bbe68c7ed2c2a93ad1b9706a30255fddb02fcbe056d8c26102207bad1f0872bc5f0cfaf22e45c925c35d6c1466e303163b75cb7688038f1a5541412102595caf9aeb6ffdd0e82b150739a83297358b9a77564de382671056ad9e5b8c58ffffffff0170861d00000000001976a91434e9cec317896e818619ab7dc99d2305216ff4af88ac00000000"
)
def test_send_bch_oldaddr(self):
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/1/0"),
# bitcoincash:qzc5q87w069lzg7g3gzx0c8dz83mn7l02scej5aluw
amount=1896050,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDADDRESS,
)
inp2 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/0/1"),
# bitcoincash:qr23ajjfd9wd73l87j642puf8cad20lfmqdgwvpat4
amount=73452,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=1,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address="15pnEDZJo3ycPUamqP3tEDnEju1oW5fBCz",
amount=1934960,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
_, serialized_tx = btc.sign_tx(
self.client, "Bcash", [inp1, inp2], [out1], prev_txes=TX_API
)
assert (
serialized_tx.hex()
== "01000000022c06cf6f215c5cbfd7caa8e71b1b32630cabf1f816a4432815b037b277852e50000000006a47304402207a2a955f1cb3dc5f03f2c82934f55654882af4e852e5159639f6349e9386ec4002205fb8419dce4e648eae8f67bc4e369adfb130a87d2ea2d668f8144213b12bb457412103174c61e9c5362507e8061e28d2c0ce3d4df4e73f3535ae0b12f37809e0f92d2dffffffff2c06cf6f215c5cbfd7caa8e71b1b32630cabf1f816a4432815b037b277852e50010000006a473044022062151cf960b71823bbe68c7ed2c2a93ad1b9706a30255fddb02fcbe056d8c26102207bad1f0872bc5f0cfaf22e45c925c35d6c1466e303163b75cb7688038f1a5541412102595caf9aeb6ffdd0e82b150739a83297358b9a77564de382671056ad9e5b8c58ffffffff0170861d00000000001976a91434e9cec317896e818619ab7dc99d2305216ff4af88ac00000000"
)
def test_attack_amount(self):
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/1/0"),
# bitcoincash:qzc5q87w069lzg7g3gzx0c8dz83mn7l02scej5aluw
amount=300,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDADDRESS,
)
inp2 = proto.TxInputType(
address_n=parse_path("44'/145'/0'/0/1"),
# bitcoincash:qr23ajjfd9wd73l87j642puf8cad20lfmqdgwvpat4
amount=70,
prev_hash=bytes.fromhex(
"502e8577b237b0152843a416f8f1ab0c63321b1be7a8cad7bf5c5c216fcf062c"
),
prev_index=1,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address="bitcoincash:qq6wnnkrz7ykaqvxrx4hmjvayvzjzml54uyk76arx4",
amount=200,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
# test if passes without modifications
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
btc.sign_tx(self.client, "Bcash", [inp1, inp2], [out1], prev_txes=TX_API)
run_attack = True
def attack_processor(msg):
nonlocal run_attack
if run_attack and msg.tx.inputs and msg.tx.inputs[0] == inp1:
# 300 is lowered to 280 at the first run
# the user confirms 280 but the transaction
# is spending 300 => larger fee without the user knowing
msg.tx.inputs[0].amount = 280
run_attack = False
return msg
# now fails
self.client.set_filter(proto.TxAck, attack_processor)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.Failure(),
]
)
with pytest.raises(CallException) as exc:
btc.sign_tx(
self.client, "Bcash", [inp1, inp2], [out1], prev_txes=TX_API
)
assert exc.value.args[0] in (
proto.FailureType.ProcessError,
proto.FailureType.DataError,
)
assert exc.value.args[1].endswith("Transaction has changed during signing")
def test_attack_change_input(self):
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/145'/10'/0/0"),
amount=1995344,
prev_hash=bytes.fromhex(
"bc37c28dfb467d2ecb50261387bf752a3977d7e5337915071bb4151e6b711a78"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address_n=parse_path("44'/145'/10'/1/0"),
amount=1896050,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="bitcoincash:qr23ajjfd9wd73l87j642puf8cad20lfmqdgwvpat4",
amount=73452,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
run_attack = False
def attack_processor(msg):
nonlocal run_attack
if msg.tx.inputs and msg.tx.inputs[0] == inp1:
if not run_attack:
run_attack = True
else:
msg.tx.inputs[0].address_n[2] = H_(1)
return msg
self.client.set_filter(proto.TxAck, attack_processor)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.Failure(code=proto.FailureType.ProcessError),
]
)
with pytest.raises(CallException):
btc.sign_tx(
self.client, "Bcash", [inp1], [out1, out2], prev_txes=TX_API
)
def test_send_bch_multisig_wrongchange(self):
self.setup_mnemonic_allallall()
xpubs = []
for n in map(
lambda index: btc.get_public_node(
self.client, parse_path("48'/145'/%d'" % index)
),
range(1, 4),
):
xpubs.append(n.xpub)
def getmultisig(chain, nr, signatures=[b"", b"", b""], xpubs=xpubs):
return proto.MultisigRedeemScriptType(
nodes=[deserialize(xpub) for xpub in xpubs],
address_n=[chain, nr],
signatures=signatures,
m=2,
)
correcthorse = proto.HDNodeType(
depth=1,
fingerprint=0,
child_num=0,
chain_code=bytes.fromhex(
"0000000000000000000000000000000000000000000000000000000000000000"
),
public_key=bytes.fromhex(
"0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71"
),
)
sig = bytes.fromhex(
"304402207274b5a4d15e75f3df7319a375557b0efba9b27bc63f9f183a17da95a6125c94022000efac57629f1522e2d3958430e2ef073b0706cfac06cce492651b79858f09ae"
)
inp1 = proto.TxInputType(
address_n=parse_path("48'/145'/1'/1/0"),
multisig=getmultisig(1, 0, [b"", sig, b""]),
# bitcoincash:pp6kcpkhua7789g2vyj0qfkcux3yvje7euhyhltn0a
amount=24000,
prev_hash=bytes.fromhex(
"f68caf10df12d5b07a34601d88fa6856c6edcbf4d05ebef3486510ae1c293d5f"
),
prev_index=1,
script_type=proto.InputScriptType.SPENDMULTISIG,
)
out1 = proto.TxOutputType(
address_n=parse_path("48'/145'/1'/1/1"),
multisig=proto.MultisigRedeemScriptType(
pubkeys=[
proto.HDNodePathType(node=deserialize(xpubs[0]), address_n=[1, 1]),
proto.HDNodePathType(node=correcthorse, address_n=[]),
proto.HDNodePathType(node=correcthorse, address_n=[]),
],
signatures=[b"", b"", b""],
m=2,
),
script_type=proto.OutputScriptType.PAYTOMULTISIG,
amount=23000,
)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
(signatures1, serialized_tx) = btc.sign_tx(
self.client, "Bcash", [inp1], [out1], prev_txes=TX_API
)
assert (
signatures1[0].hex()
== "304402201badcdcafef4855ed58621f95935efcbc72068510472140f4ec5e252faa0af93022003310a43488288f70aedee96a5af2643a255268a6858cda9ae3001ea5e3c7557"
)
assert (
serialized_tx.hex()
== "01000000015f3d291cae106548f3be5ed0f4cbedc65668fa881d60347ab0d512df10af8cf601000000fc0047304402201badcdcafef4855ed58621f95935efcbc72068510472140f4ec5e252faa0af93022003310a43488288f70aedee96a5af2643a255268a6858cda9ae3001ea5e3c75574147304402207274b5a4d15e75f3df7319a375557b0efba9b27bc63f9f183a17da95a6125c94022000efac57629f1522e2d3958430e2ef073b0706cfac06cce492651b79858f09ae414c69522102245739b55787a27228a4fe78b3a324366cc645fbaa708cad45da351a334341192102debbdcb0b6970d5ade84a50fdbda1c701cdde5c9925d9b6cd8e05a9a15dbef352102ffe5fa04547b2b0c3cfbc21c08a1ddfb147025fee10274cdcd5c1bdeee88eae253aeffffffff01d85900000000000017a914a23eb2a1ed4003d357770120f5c370e199ee55468700000000"
)
def test_send_bch_multisig_change(self):
self.setup_mnemonic_allallall()
xpubs = []
for n in map(
lambda index: btc.get_public_node(
self.client, parse_path("48'/145'/%d'" % index)
),
range(1, 4),
):
xpubs.append(n.xpub)
def getmultisig(chain, nr, signatures=[b"", b"", b""], xpubs=xpubs):
return proto.MultisigRedeemScriptType(
nodes=[deserialize(xpub) for xpub in xpubs],
address_n=[chain, nr],
signatures=signatures,
m=2,
)
inp1 = proto.TxInputType(
address_n=parse_path("48'/145'/3'/0/0"),
multisig=getmultisig(0, 0),
amount=48490,
prev_hash=bytes.fromhex(
"8b6db9b8ba24235d86b053ea2ccb484fc32b96f89c3c39f98d86f90db16076a0"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDMULTISIG,
)
out1 = proto.TxOutputType(
address="bitcoincash:qqq8gx2j76nw4dfefumxmdwvtf2tpsjznusgsmzex9",
amount=24000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=parse_path("48'/145'/3'/1/0"),
multisig=getmultisig(1, 0),
script_type=proto.OutputScriptType.PAYTOMULTISIG,
amount=24000,
)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
(signatures1, serialized_tx) = btc.sign_tx(
self.client, "Bcash", [inp1], [out1, out2], prev_txes=TX_API
)
assert (
signatures1[0].hex()
== "3045022100a05f77bb39515c21c43e6c4ba401f39ed5d409dc3cfcd90f9a8345a08cc4bc8202205faf8f3b0775748278495324fdd60f370460452e4995e546450209ec4804a0f3"
)
inp1 = proto.TxInputType(
address_n=parse_path("48'/145'/1'/0/0"),
multisig=getmultisig(0, 0, [b"", b"", signatures1[0]]),
# bitcoincash:pqguz4nqq64jhr5v3kvpq4dsjrkda75hwy86gq0qzw
amount=48490,
prev_hash=bytes.fromhex(
"8b6db9b8ba24235d86b053ea2ccb484fc32b96f89c3c39f98d86f90db16076a0"
),
prev_index=0,
script_type=proto.InputScriptType.SPENDMULTISIG,
)
out2.address_n[2] = H_(1)
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.TxRequest(request_type=proto.RequestType.TXFINISHED),
]
)
(signatures1, serialized_tx) = btc.sign_tx(
self.client, "Bcash", [inp1], [out1, out2], prev_txes=TX_API
)
assert (
signatures1[0].hex()
== "3044022006f239ef1f065a70873ab9d2c81a623a04ec7a37a0ec5299d3c585668f441f49022032b2f9ef13bc61230d14f6d79b9ad1bbebdf47b95e4757e9af1b1dcdf520d3ab"
)
assert (
serialized_tx.hex()
== "0100000001a07660b10df9868df9393c9cf8962bc34f48cb2cea53b0865d2324bab8b96d8b00000000fdfd0000473044022006f239ef1f065a70873ab9d2c81a623a04ec7a37a0ec5299d3c585668f441f49022032b2f9ef13bc61230d14f6d79b9ad1bbebdf47b95e4757e9af1b1dcdf520d3ab41483045022100a05f77bb39515c21c43e6c4ba401f39ed5d409dc3cfcd90f9a8345a08cc4bc8202205faf8f3b0775748278495324fdd60f370460452e4995e546450209ec4804a0f3414c69522102f8ca0d9665af03de32a7c19a167a4f6e97e4e0ed9505f75d11f7a45ab60b1f4d2103263d87cefd687bc15b4ef7801f9f538267b66d46f18e9fccc41d54071cfdd1ce210388568bf42f02298308eb6fa2fa4b446d544600253b4409be27e2c0c1a71c424853aeffffffff02c05d0000000000001976a91400741952f6a6eab5394f366db5cc5a54b0c2429f88acc05d00000000000017a91478574751407449b97f8054be2e40e684ad07d3738700000000"
)
| 46.716718
| 761
| 0.591371
| 2,084
| 30,179
| 8.414107
| 0.1262
| 0.039521
| 0.067066
| 0.07984
| 0.742344
| 0.7355
| 0.715369
| 0.714457
| 0.705218
| 0.689649
| 0
| 0.183846
| 0.33583
| 30,179
| 645
| 762
| 46.789147
| 0.69098
| 0.043971
| 0
| 0.756477
| 0
| 0
| 0.182326
| 0.170564
| 0
| 0
| 0
| 0
| 0.017271
| 1
| 0.018998
| false
| 0
| 0.010363
| 0.003454
| 0.037997
| 0.001727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7755d62a8cddeb6b7c4563e1d0784e854d4481ed
| 34
|
py
|
Python
|
src/certipy/__init__.py
|
rlongio/certipy
|
901c3aa8f52814108b73ea000d91aec5fa8eca8d
|
[
"MIT"
] | null | null | null |
src/certipy/__init__.py
|
rlongio/certipy
|
901c3aa8f52814108b73ea000d91aec5fa8eca8d
|
[
"MIT"
] | 2
|
2021-01-06T06:47:01.000Z
|
2021-06-25T15:47:26.000Z
|
src/certipy/__init__.py
|
rlongio/certipy
|
901c3aa8f52814108b73ea000d91aec5fa8eca8d
|
[
"MIT"
] | null | null | null |
from certipy.descriptors import *
| 17
| 33
| 0.823529
| 4
| 34
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
624f5aadc27db776c78bd621727274f49c1aaee6
| 18,611
|
py
|
Python
|
tests/asgard/workers/autoscaler/test_autoscaler.py
|
rockerbacon/asgard-api
|
1c1eb19225ace4bbecb06b65b1b9c4ab131eb24a
|
[
"MIT"
] | 3
|
2020-01-10T02:16:09.000Z
|
2020-02-19T18:42:37.000Z
|
tests/asgard/workers/autoscaler/test_autoscaler.py
|
b2wdigital/asgard-api
|
5444d81be33bf4af3c9cf5a2185c16ff10357034
|
[
"MIT"
] | 13
|
2020-01-15T18:22:35.000Z
|
2021-03-31T19:21:54.000Z
|
tests/asgard/workers/autoscaler/test_autoscaler.py
|
rockerbacon/asgard-api
|
1c1eb19225ace4bbecb06b65b1b9c4ab131eb24a
|
[
"MIT"
] | 6
|
2020-03-07T09:49:19.000Z
|
2021-07-25T03:14:10.000Z
|
from aioresponses import aioresponses
from asynctest import TestCase
from yarl import URL
from asgard.conf import settings
from asgard.workers.autoscaler.app import scale_all_apps
from asgard.workers.autoscaler.asgard_cloudinterface import (
AsgardInterface as AsgardCloudInterface,
)
from asgard.workers.autoscaler.periodicstatechecker import PeriodicStateChecker
from asgard.workers.autoscaler.simple_decision_component import (
DecisionComponent,
)
class AutoscalerTest(TestCase):
async def test_scale_one_app(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "100",
"ram_pct": "100",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
"asgard.autoscale.ignore": "all",
},
},
{
"id": "/test_app2",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.1,
"asgard.autoscale.mem": 0.1,
"asgard.autoscale.ignore": "",
},
},
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
apps_stats = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps_stats)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("put", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(1, len(scaling_decision))
self.assertEqual(10, scaling_decision[0].mem)
self.assertEqual("test_app2", scaling_decision[0].id)
self.assertEqual(35, scaling_decision[0].cpu)
self.assertIsNotNone(scale_spy)
async def test_decide_to_scale_all_apps(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "100",
"ram_pct": "100",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
"asgard.autoscale.ignore": "cpu",
},
},
{
"id": "/test_app2",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.1,
"asgard.autoscale.mem": 0.6,
"asgard.autoscale.ignore": "mem",
},
},
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
apps_stats = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps_stats)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("put", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(len(apps_stats), len(scaling_decision))
self.assertEqual(1.25, scaling_decision[0].mem)
self.assertEqual("test_app1", scaling_decision[0].id)
self.assertEqual(None, scaling_decision[0].cpu)
self.assertEqual("test_app2", scaling_decision[1].id)
self.assertEqual(None, scaling_decision[1].mem)
self.assertEqual(35, scaling_decision[1].cpu)
self.assertIsNotNone(scale_spy)
async def test_decide_to_scale_some_apps(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "100",
"ram_pct": "100",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
"asgard.autoscale.ignore": "all",
},
},
{
"id": "/test_app2",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.1,
"asgard.autoscale.mem": 0.1,
"asgard.autoscale.ignore": "",
},
},
{
"id": "/test_app3",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.5,
"asgard.autoscale.mem": 0.7,
"asgard.autoscale.ignore": "mem",
},
},
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
apps_stats = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps_stats)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("put", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(2, len(scaling_decision))
self.assertEqual(10, scaling_decision[0].mem)
self.assertEqual("test_app2", scaling_decision[0].id)
self.assertEqual(35, scaling_decision[0].cpu)
self.assertEqual("test_app3", scaling_decision[1].id)
self.assertEqual(None, scaling_decision[1].mem)
self.assertEqual(7, scaling_decision[1].cpu)
self.assertIsNotNone(scale_spy)
async def test_decide_to_scale_no_apps(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "1",
"ram_pct": "1",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
"asgard.autoscale.ignore": "all",
},
},
{
"id": "/test_app2",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.1,
"asgard.autoscale.mem": 0.1,
"asgard.autoscale.ignore": "cpu,mem",
},
},
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
apps = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("PUT", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(0, len(scaling_decision))
self.assertIsNone(scale_spy)
async def test_does_not_scale_when_difference_less_than_5_percent(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "25.1",
"ram_pct": "84.9",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
},
}
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
apps_stats = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps_stats)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("PUT", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(0, len(scaling_decision))
self.assertEqual(1, len(apps_stats), "didn't fetch one app")
self.assertEqual(
0, len(scaling_decision), "didn't make scaling decision"
)
self.assertIsNone(scale_spy)
async def test_scales_when_difference_more_than_5_percent(self):
cloud_interface = AsgardCloudInterface()
state_checker = PeriodicStateChecker(cloud_interface)
decision_maker = DecisionComponent()
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "24.9",
"ram_pct": "85.1",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
},
}
]
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
apps_stats = await state_checker.get_scalable_apps_stats()
scaling_decision = decision_maker.decide_scaling_actions(apps_stats)
await cloud_interface.apply_decisions(scaling_decision)
scale_spy = rsps.requests.get(
("put", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertEqual(1, len(apps_stats), "didn't fetch one app")
self.assertEqual(
1, len(scaling_decision), "didn't make scaling decision"
)
self.assertEqual(
"test_app1", scaling_decision[0].id, "made decision for wrong app"
)
self.assertEqual(
2.905, scaling_decision[0].cpu, "scaled cpu to incorrect value"
)
self.assertEqual(
1.06375, scaling_decision[0].mem, "scaled memory to incorrect value"
)
self.assertIsNotNone(scale_spy)
async def test_worker_polling(self):
with aioresponses() as rsps:
stats_fixture = {
"stats": {
"type": "ASGARD",
"errors": {},
"cpu_pct": "100",
"ram_pct": "100",
"cpu_thr_pct": "0",
}
}
apps_fixture = {
"apps": [
{
"id": "/test_app1",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.3,
"asgard.autoscale.mem": 0.8,
"asgard.autoscale.ignore": "all",
},
},
{
"id": "/test_app2",
"cpus": 3.5,
"mem": 1.0,
"labels": {
"asgard.autoscale.cpu": 0.1,
"asgard.autoscale.mem": 0.1,
"asgard.autoscale.ignore": "",
},
},
]
}
body_fixture = [{"id": "test_app2", "mem": 10.0, "cpus": 35.0}]
headers_fixture = {
"Content-Type": "application/json",
"Authorization": f"Token {settings.AUTOSCALER_AUTH_TOKEN}",
}
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload=apps_fixture,
)
for app in apps_fixture["apps"]:
rsps.get(
f"{settings.ASGARD_API_ADDRESS}/apps{app['id']}/stats/avg-1min",
status=200,
payload=stats_fixture,
)
rsps.put(
f"{settings.ASGARD_API_ADDRESS}/v2/apps",
status=200,
payload={"deploymentId": "test", "version": "1.0"},
)
await scale_all_apps(None)
scale_spy = rsps.requests.get(
("put", URL(f"{settings.ASGARD_API_ADDRESS}/v2/apps"))
)
self.assertIsNotNone(scale_spy)
self.assertEqual(body_fixture, scale_spy[0].kwargs.get("json"))
| 35.048964
| 84
| 0.433453
| 1,576
| 18,611
| 4.909264
| 0.086294
| 0.075611
| 0.054285
| 0.065142
| 0.874111
| 0.873853
| 0.866227
| 0.851881
| 0.841799
| 0.821636
| 0
| 0.032912
| 0.456343
| 18,611
| 530
| 85
| 35.115094
| 0.731765
| 0
| 0
| 0.665236
| 0
| 0
| 0.176885
| 0.07963
| 0
| 0
| 0
| 0
| 0.075107
| 1
| 0
| false
| 0
| 0.017167
| 0
| 0.019313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62741ad09bb3e37ce9a5eed2a49e42a74de368ef
| 183
|
py
|
Python
|
app/cmuxovik/admin.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | 2
|
2020-03-31T18:01:55.000Z
|
2020-03-31T18:45:02.000Z
|
app/cmuxovik/admin.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | 35
|
2020-03-31T17:47:09.000Z
|
2022-03-12T00:22:54.000Z
|
app/cmuxovik/admin.py
|
artem343/cmuxovik
|
6f923f66ba47d0c513659c332fd8c89d21ea4abf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Cmux, Tag, Author, Vote
admin.site.register(Cmux)
admin.site.register(Tag)
admin.site.register(Author)
admin.site.register(Vote)
| 22.875
| 43
| 0.797814
| 28
| 183
| 5.214286
| 0.428571
| 0.246575
| 0.465753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087432
| 183
| 7
| 44
| 26.142857
| 0.874252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
65905bfd4a48dc294e4630aaf88b314b42c4ecb4
| 98
|
py
|
Python
|
misago/misago/users/permissions/__init__.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/users/permissions/__init__.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
misago/users/permissions/__init__.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
from .decorators import *
from .delete import *
from .moderation import *
from .profiles import *
| 19.6
| 25
| 0.755102
| 12
| 98
| 6.166667
| 0.5
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 98
| 4
| 26
| 24.5
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02ec18dfc864d0a6c899ca80eaaffcc894cdf0dd
| 92
|
py
|
Python
|
test_ex1.py
|
assafine/IML.HUJI
|
b81b8beff05b5f120aa21a2f7fe90b4db95174f4
|
[
"MIT"
] | null | null | null |
test_ex1.py
|
assafine/IML.HUJI
|
b81b8beff05b5f120aa21a2f7fe90b4db95174f4
|
[
"MIT"
] | null | null | null |
test_ex1.py
|
assafine/IML.HUJI
|
b81b8beff05b5f120aa21a2f7fe90b4db95174f4
|
[
"MIT"
] | null | null | null |
from IMLearn.learners import gaussian_estimators as ge
def test_check():
assert 1==1
| 13.142857
| 54
| 0.75
| 14
| 92
| 4.785714
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.184783
| 92
| 6
| 55
| 15.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f321e74e9db7a140ac368343c50ee96a580c20f5
| 158
|
py
|
Python
|
tlssecondopinion/views.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | null | null | null |
tlssecondopinion/views.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | 4
|
2017-04-13T02:51:42.000Z
|
2017-04-13T02:53:12.000Z
|
tlssecondopinion/views.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | null | null | null |
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import View
def index(request):
return HttpResponseRedirect('scan')
| 26.333333
| 58
| 0.816456
| 18
| 158
| 7.166667
| 0.777778
| 0.155039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 158
| 5
| 59
| 31.6
| 0.921429
| 0
| 0
| 0
| 0
| 0
| 0.025316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b84f811945d499d61f4cfa2c6c97c0f2b2f59b90
| 159
|
py
|
Python
|
web/forum/admin.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
web/forum/admin.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
web/forum/admin.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Section, Topic, Post
admin.site.register(Section)
admin.site.register(Topic)
admin.site.register(Post)
| 17.666667
| 40
| 0.798742
| 23
| 159
| 5.521739
| 0.478261
| 0.212598
| 0.401575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100629
| 159
| 8
| 41
| 19.875
| 0.888112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b86ac4a334151f71615d89701a5a23ee8fa3e500
| 46
|
py
|
Python
|
olivenodes/__init__.py
|
GordStephen/olivenodes
|
52fc2a1cc538da665c026f8c574db24add2d2b1b
|
[
"MIT"
] | null | null | null |
olivenodes/__init__.py
|
GordStephen/olivenodes
|
52fc2a1cc538da665c026f8c574db24add2d2b1b
|
[
"MIT"
] | null | null | null |
olivenodes/__init__.py
|
GordStephen/olivenodes
|
52fc2a1cc538da665c026f8c574db24add2d2b1b
|
[
"MIT"
] | null | null | null |
from .graph import Graph
from .nodes import *
| 15.333333
| 24
| 0.76087
| 7
| 46
| 5
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 25
| 23
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b86fbf8d8db90190972c4e419cdd67cbb59838e6
| 30
|
py
|
Python
|
mlps/core/apeflow/interface/utils/__init__.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | null | null | null |
mlps/core/apeflow/interface/utils/__init__.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 2
|
2022-03-31T07:39:59.000Z
|
2022-03-31T07:40:18.000Z
|
mlps/core/apeflow/interface/utils/__init__.py
|
seculayer/AutoAPE-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 1
|
2021-11-03T09:09:07.000Z
|
2021-11-03T09:09:07.000Z
|
from . import gs, pytorch, tf
| 15
| 29
| 0.7
| 5
| 30
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 1
| 30
| 30
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b87439f8973b5e5d18727fc97759c6de6a5e3b66
| 35,743
|
py
|
Python
|
test/dataset_test.py
|
cs6ting/pytorch_geometric_temporal
|
c854cd6fb0998c528c3c564703f05eba7953ea65
|
[
"MIT"
] | 1
|
2020-06-27T01:48:33.000Z
|
2020-06-27T01:48:33.000Z
|
test/dataset_test.py
|
cs6ting/pytorch_geometric_temporal
|
c854cd6fb0998c528c3c564703f05eba7953ea65
|
[
"MIT"
] | null | null | null |
test/dataset_test.py
|
cs6ting/pytorch_geometric_temporal
|
c854cd6fb0998c528c3c564703f05eba7953ea65
|
[
"MIT"
] | null | null | null |
import numpy as np
import networkx as nx
from torch_geometric_temporal.signal import temporal_signal_split
from torch_geometric_temporal.signal import StaticGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicGraphStaticSignal
from torch_geometric_temporal.signal import StaticHeteroGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignal
from torch_geometric_temporal.signal import DynamicHeteroGraphStaticSignal
from torch_geometric_temporal.dataset import METRLADatasetLoader, PemsBayDatasetLoader
from torch_geometric_temporal.dataset import (
ChickenpoxDatasetLoader,
PedalMeDatasetLoader,
WikiMathsDatasetLoader,
EnglandCovidDatasetLoader,
)
from torch_geometric_temporal.dataset import (
TwitterTennisDatasetLoader,
MontevideoBusDatasetLoader,
MTMDatasetLoader,
)
from torch_geometric_temporal.dataset import (
WindmillOutputLargeDatasetLoader,
WindmillOutputMediumDatasetLoader,
WindmillOutputSmallDatasetLoader,
)
def get_edge_array(n_count):
return np.array([edge for edge in nx.gnp_random_graph(n_count, 0.1).edges()]).T
def generate_signal(snapshot_count, n_count, feature_count, additional_features_keys=[]):
edge_indices = [get_edge_array(n_count) for _ in range(snapshot_count)]
edge_weights = [np.ones(edge_indices[t].shape[1]) for t in range(snapshot_count)]
features = [
np.random.uniform(0, 1, (n_count, feature_count)) for _ in range(snapshot_count)
]
if additional_features_keys:
additional_features = {
key: [np.random.uniform(0, 1, (n_count, feature_count)) for _ in range(snapshot_count)
] for key in additional_features_keys}
return edge_indices, edge_weights, features, additional_features
return edge_indices, edge_weights, features
def generate_heterogeneous_signal(snapshot_count, n_count, feature_count, *additional_features_keys):
edge_index_dicts = [{('author', 'writes', 'paper'): get_edge_array(n_count)} for _ in range(snapshot_count)]
edge_weight_dicts = [{('author', 'writes', 'paper'): np.ones(edge_index_dicts[t][('author', 'writes', 'paper')].shape[1])}
for t in range(snapshot_count)]
feature_dicts = [{'author': np.random.uniform(0, 1, (n_count, feature_count)),
'paper': np.random.uniform(0, 1, (n_count, feature_count))} for _ in range(snapshot_count)]
target_dicts = [{'author': np.random.uniform(0, 10, (n_count,)),
'paper': np.random.uniform(0, 10, (n_count,))} for _ in range(snapshot_count)]
if additional_features_keys:
additional_features = {
key: [{'author': np.random.uniform(0, 1, (n_count, feature_count)),
'paper': np.random.uniform(0, 1, (n_count, feature_count))} for _ in range(snapshot_count)]
for key in additional_features_keys}
return edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, additional_features
return edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts
def test_dynamic_graph_temporal_signal_real():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features = generate_signal(250, 100, 32)
targets = [np.random.uniform(0, 10, (n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphTemporalSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
targets = [
np.floor(np.random.uniform(0, 10, (n_count,))).astype(int)
for _ in range(snapshot_count)
]
dataset = DynamicGraphTemporalSignal(edge_indices, edge_weights, features, targets)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
def test_static_graph_temporal_signal():
dataset = StaticGraphTemporalSignal(None, None, [None, None], [None, None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_dynamic_graph_temporal_signal():
dataset = DynamicGraphTemporalSignal(
[None, None], [None, None], [None, None], [None, None]
)
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_static_graph_temporal_signal_typing():
dataset = StaticGraphTemporalSignal(None, None, [np.array([1])], [np.array([2])])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x.shape == (1,)
assert snapshot.y.shape == (1,)
def test_dynamic_graph_static_signal_typing():
dataset = DynamicGraphStaticSignal([None], [None], None, [None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
def test_static_graph_temporal_signal_additional_attrs():
dataset = StaticGraphTemporalSignal(None, None, [None], [None],
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_dynamic_graph_static_signal_additional_attrs():
dataset = DynamicGraphStaticSignal([None], [None], None, [None],
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_dynamic_graph_temporal_signal_additional_attrs():
dataset = DynamicGraphTemporalSignal([None], [None], [None], [None],
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_static_hetero_graph_temporal_signal():
dataset = StaticHeteroGraphTemporalSignal(None, None, [None], [None])
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_static_hetero_graph_temporal_signal_typing():
dataset = StaticHeteroGraphTemporalSignal(None, None, [{'author': np.array([1])}], [{'author': np.array([2])}])
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_static_hetero_graph_temporal_signal_additional_attrs():
dataset = StaticHeteroGraphTemporalSignal(None, None, [None], [None],
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_static_hetero_graph_temporal_signal_edges():
dataset = StaticHeteroGraphTemporalSignal({("author", "writes", "paper"): np.array([[0, 1], [1, 0]])},
{("author", "writes", "paper"): np.array([[0.1], [0.1]])},
[{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])},
{"author": np.array([[0.1], [0.1]]),
"paper": np.array([[0.1], [0.1], [0.1]])}],
[None, None])
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_dynamic_hetero_graph_static_signal():
dataset = DynamicHeteroGraphStaticSignal([None], [None], None, [None])
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_dynamic_hetero_graph_static_signal_typing():
dataset = DynamicHeteroGraphStaticSignal([None], [None], {'author': np.array([1])}, [{'author': np.array([2])}])
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_static_signal_additional_attrs():
dataset = DynamicHeteroGraphStaticSignal([None], [None], None, [None],
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_dynamic_hetero_graph_static_signal_edges():
dataset = DynamicHeteroGraphStaticSignal([{("author", "writes", "paper"): np.array([[0, 1], [1, 0]])}],
[{("author", "writes", "paper"): np.array([[0.1], [0.1]])}],
{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])},
[None])
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_dynamic_hetero_graph_temporal_signal():
dataset = DynamicHeteroGraphTemporalSignal(
[None, None], [None, None], [None, None], [None, None]
)
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_dynamic_hetero_graph_temporal_signal_typing():
dataset = DynamicHeteroGraphTemporalSignal([None], [None], [{'author': np.array([1])}], [{'author': np.array([2])}])
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_temporal_signal_additional_attrs():
dataset = DynamicHeteroGraphTemporalSignal([None], [None], [None], [None],
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_dynamic_hetero_graph_temporal_signal_edges():
dataset = DynamicHeteroGraphTemporalSignal([{("author", "writes", "paper"): np.array([[0, 1], [1, 0]])}],
[{("author", "writes", "paper"): np.array([[0.1], [0.1]])}],
[{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])}],
[None])
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_chickenpox():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102,)
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20,)
def test_pedalme():
loader = PedalMeDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 225)
assert snapshot.edge_attr.shape == (225,)
assert snapshot.x.shape == (15, 4)
assert snapshot.y.shape == (15,)
def test_wiki():
loader = WikiMathsDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(1):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 27079)
assert snapshot.edge_attr.shape == (27079,)
assert snapshot.x.shape == (1068, 8)
assert snapshot.y.shape == (1068,)
def test_windmilllarge():
loader = WindmillOutputLargeDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 101761)
assert snapshot.edge_attr.shape == (101761,)
assert snapshot.x.shape == (319, 8)
assert snapshot.y.shape == (319,)
def test_windmillsmall():
loader = WindmillOutputSmallDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 121)
assert snapshot.edge_attr.shape == (121,)
assert snapshot.x.shape == (11, 8)
assert snapshot.y.shape == (11,)
def test_windmillmedium():
loader = WindmillOutputMediumDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 676)
assert snapshot.edge_attr.shape == (676,)
assert snapshot.x.shape == (26, 8)
assert snapshot.y.shape == (26,)
def test_covid():
loader = EnglandCovidDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_attr.shape[0] == snapshot.edge_index.shape[1]
assert snapshot.x.shape == (129, 8)
assert snapshot.y.shape == (129,)
def test_montevideobus():
loader = MontevideoBusDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(1):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 690)
assert snapshot.edge_attr.shape == (690,)
assert snapshot.x.shape == (675, 4)
assert snapshot.y.shape == (675,)
def test_metrla():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722,)
assert snapshot.x.shape == (207, 2, 12)
assert snapshot.y.shape == (207, 12)
def test_metrla_task_generator():
loader = METRLADatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 1722)
assert snapshot.edge_attr.shape == (1722,)
assert snapshot.x.shape == (207, 2, 6)
assert snapshot.y.shape == (207, 5)
def test_pemsbay():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset()
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694,)
assert snapshot.x.shape == (325, 2, 12)
assert snapshot.y.shape == (325, 2, 12)
def test_pemsbay_task_generator():
loader = PemsBayDatasetLoader(raw_data_dir="/tmp/")
dataset = loader.get_dataset(num_timesteps_in=6, num_timesteps_out=5)
for epoch in range(2):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 2694)
assert snapshot.edge_attr.shape == (2694,)
assert snapshot.x.shape == (325, 2, 6)
assert snapshot.y.shape == (325, 2, 5)
def check_tennis_data(event_id, node_count, mode, edge_cnt):
loader = TwitterTennisDatasetLoader(event_id, N=node_count, feature_mode=mode)
dataset = loader.get_dataset()
for epoch in range(3):
i = 0
for snapshot in dataset:
if node_count == 1000:
assert snapshot.edge_index.shape == (2, edge_cnt[i])
assert snapshot.edge_attr.shape == (edge_cnt[i],)
else:
assert snapshot.edge_index.shape[1] <= edge_cnt[i]
assert snapshot.edge_attr.shape[0] <= edge_cnt[i]
if mode == "encoded":
assert snapshot.x.shape == (node_count, 16)
elif mode == "diagonal":
assert snapshot.x.shape == (node_count, node_count)
else:
assert snapshot.x.shape == (node_count, 2)
assert snapshot.y.shape == (node_count,)
i += 1
def test_twitter_tennis_rg17():
edges_in_snapshots = [
89,
61,
67,
283,
569,
515,
527,
262,
115,
85,
127,
315,
639,
841,
662,
341,
136,
108,
127,
257,
564,
664,
646,
424,
179,
82,
111,
250,
689,
897,
597,
352,
225,
109,
81,
305,
483,
816,
665,
310,
141,
145,
86,
285,
748,
703,
682,
341,
199,
102,
84,
327,
786,
776,
419,
208,
91,
78,
83,
263,
670,
880,
731,
361,
122,
68,
101,
269,
547,
673,
612,
221,
156,
99,
137,
262,
373,
368,
648,
288,
127,
62,
84,
319,
936,
889,
699,
291,
186,
83,
99,
191,
343,
502,
561,
283,
96,
92,
74,
178,
461,
720,
712,
279,
88,
41,
74,
137,
266,
664,
364,
167,
68,
59,
48,
178,
391,
815,
315,
189,
]
check_tennis_data("rg17", 1000, None, edges_in_snapshots)
check_tennis_data("rg17", 50, "diagonal", edges_in_snapshots)
def test_twitter_tennis_uo17():
edges_in_snapshots = [
88,
113,
273,
423,
718,
625,
640,
758,
434,
137,
289,
450,
625,
489,
336,
462,
284,
130,
188,
335,
523,
652,
584,
619,
452,
198,
206,
387,
464,
698,
601,
434,
279,
180,
162,
350,
613,
793,
474,
368,
231,
195,
152,
404,
591,
709,
642,
476,
413,
248,
160,
296,
521,
727,
725,
542,
200,
157,
268,
382,
638,
612,
640,
588,
250,
142,
142,
197,
341,
458,
395,
535,
256,
128,
180,
274,
732,
610,
632,
732,
481,
194,
206,
241,
287,
304,
376,
742,
196,
172,
117,
220,
311,
389,
610,
596,
165,
183,
183,
163,
406,
738,
464,
209,
103,
143,
115,
227,
203,
455,
638,
195,
]
check_tennis_data("uo17", 1000, None, edges_in_snapshots)
check_tennis_data("uo17", 200, "encoded", edges_in_snapshots)
def test_mtm():
loader = MTMDatasetLoader()
dataset = loader.get_dataset()
for epoch in range(3):
for snapshot in dataset:
assert snapshot.edge_index.shape == (2, 19)
assert snapshot.edge_attr.shape == (19,)
assert snapshot.x.shape == (3, 21, 16)
assert snapshot.y.shape == (16, 6)
def test_discrete_train_test_split_static():
loader = ChickenpoxDatasetLoader()
dataset = loader.get_dataset()
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102,)
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20,)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape == (2, 102)
assert snapshot.edge_attr.shape == (102,)
assert snapshot.x.shape == (20, 4)
assert snapshot.y.shape == (20,)
def test_discrete_train_test_split_dynamic():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features, additional_features = generate_signal(
250, 100, 32, ["optional1", "optional2"]
)
targets = [np.random.uniform(0, 10, (n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphTemporalSignal(
edge_indices, edge_weights, features, targets, **additional_features
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
def test_train_test_split_dynamic_graph_static_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features, additional_features = generate_signal(
250, 100, 32, ["optional1", "optional2"]
)
targets = [np.random.uniform(0, 10, (n_count,)) for _ in range(snapshot_count)]
dataset = StaticGraphTemporalSignal(
edge_indices[0], edge_weights[0], features, targets, **additional_features
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
def test_discrete_train_test_split_dynamic_graph_static_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_indices, edge_weights, features, additional_features = generate_signal(
250, 100, 32, ["optional1", "optional2"]
)
feature = features[0]
targets = [np.random.uniform(0, 10, (n_count,)) for _ in range(snapshot_count)]
dataset = DynamicGraphStaticSignal(
edge_indices, edge_weights, feature, targets, **additional_features
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
for epoch in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (100, 32)
assert snapshot.y.shape == (100,)
assert getattr(snapshot, "optional1").shape == (100, 32)
assert getattr(snapshot, "optional2").shape == (100, 32)
def test_train_test_split_dynamic_hetero_graph_temporal_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, additional_feature_dicts = generate_heterogeneous_signal(
snapshot_count, n_count, feature_count, "optional1", "optional2"
)
dataset = DynamicHeteroGraphTemporalSignal(
edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, **additional_feature_dicts
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
for epoch in range(2):
for snapshot in train_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
def test_train_test_split_static_hetero_graph_temporal_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, additional_feature_dicts = generate_heterogeneous_signal(
snapshot_count, n_count, feature_count, "optional1", "optional2"
)
dataset = StaticHeteroGraphTemporalSignal(
edge_index_dicts[0], edge_weight_dicts[0], feature_dicts, target_dicts, **additional_feature_dicts
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
for epoch in range(2):
for snapshot in train_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
def test_train_test_split_dynamic_hetero_graph_static_signal():
snapshot_count = 250
n_count = 100
feature_count = 32
edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, additional_feature_dicts = generate_heterogeneous_signal(
snapshot_count, n_count, feature_count, "optional1", "optional2"
)
dataset = DynamicHeteroGraphStaticSignal(
edge_index_dicts, edge_weight_dicts, feature_dicts[0], target_dicts, **additional_feature_dicts
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for epoch in range(2):
for snapshot in test_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
for epoch in range(2):
for snapshot in train_dataset:
assert len(snapshot.node_types) == 2
assert snapshot.node_types[0] == 'author'
assert snapshot.node_types[1] == 'paper'
assert snapshot.node_stores[0]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['x'].shape == (n_count, feature_count)
assert snapshot.node_stores[0]['y'].shape == (n_count,)
assert snapshot.node_stores[1]['y'].shape == (n_count,)
assert len(snapshot.edge_types) == 1
assert snapshot.edge_types[0] == ('author', 'writes', 'paper')
assert snapshot.edge_stores[0].edge_index.shape[0] == 2
assert snapshot.edge_stores[0].edge_index.shape[1] == snapshot.edge_stores[0].edge_attr.shape[0]
assert snapshot.node_stores[1]['optional1'].shape == (n_count, feature_count)
assert snapshot.node_stores[1]['optional2'].shape == (n_count, feature_count)
| 35.958753
| 127
| 0.595893
| 4,156
| 35,743
| 4.923003
| 0.087825
| 0.145064
| 0.07478
| 0.056305
| 0.850684
| 0.826882
| 0.75391
| 0.740371
| 0.718328
| 0.714809
| 0
| 0.062839
| 0.283188
| 35,743
| 993
| 128
| 35.994965
| 0.735725
| 0
| 0
| 0.549464
| 0
| 0
| 0.034636
| 0
| 0
| 0
| 0
| 0
| 0.309893
| 1
| 0.054827
| false
| 0
| 0.015495
| 0.001192
| 0.076281
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8babb922debcc53d4eff231c9a6594f89d4ae65
| 111
|
py
|
Python
|
basic_examples/test_2_const.py
|
yurioliveira3/Python
|
02a3ea3bd44f93e51d5fc8b9cc017cf53e68266d
|
[
"MIT"
] | null | null | null |
basic_examples/test_2_const.py
|
yurioliveira3/Python
|
02a3ea3bd44f93e51d5fc8b9cc017cf53e68266d
|
[
"MIT"
] | null | null | null |
basic_examples/test_2_const.py
|
yurioliveira3/Python
|
02a3ea3bd44f93e51d5fc8b9cc017cf53e68266d
|
[
"MIT"
] | null | null | null |
class A:
def __init__(self):
print("1")
def __init__(self):
print("2")
a= A()
| 13.875
| 23
| 0.45045
| 14
| 111
| 3
| 0.571429
| 0.333333
| 0.52381
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.387387
| 111
| 8
| 24
| 13.875
| 0.588235
| 0
| 0
| 0.333333
| 0
| 0
| 0.017857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.5
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b21d202ec481227d6aa61310ac31e8d5203dbd9e
| 157
|
py
|
Python
|
src/infrastructure/clients/provider/xchange_api/exceptions.py
|
sdediego/forex-django-clean-architecture
|
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
|
[
"MIT"
] | 8
|
2021-11-09T16:43:38.000Z
|
2022-03-25T16:04:26.000Z
|
src/infrastructure/clients/provider/xchange_api/exceptions.py
|
sdediego/forex-django-clean-architecture
|
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
|
[
"MIT"
] | null | null | null |
src/infrastructure/clients/provider/xchange_api/exceptions.py
|
sdediego/forex-django-clean-architecture
|
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
|
[
"MIT"
] | 2
|
2021-11-16T21:17:31.000Z
|
2022-02-11T11:15:29.000Z
|
# coding: utf-8
from src.infrastructure.clients.provider.exceptions import ProviderDriverError
class XChangeAPIDriverError(ProviderDriverError):
pass
| 19.625
| 78
| 0.828025
| 15
| 157
| 8.666667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.10828
| 157
| 7
| 79
| 22.428571
| 0.921429
| 0.082803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b2588fcc26158f6f11a505faab691ec793b0b8ed
| 14,232
|
py
|
Python
|
polaris/polaris/tests/sep12/test_customer.py
|
Zagan202/django-polaris
|
62802ec1585b57cd34e99e3993f2ddff662b9aaf
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/tests/sep12/test_customer.py
|
Zagan202/django-polaris
|
62802ec1585b57cd34e99e3993f2ddff662b9aaf
|
[
"Apache-2.0"
] | null | null | null |
polaris/polaris/tests/sep12/test_customer.py
|
Zagan202/django-polaris
|
62802ec1585b57cd34e99e3993f2ddff662b9aaf
|
[
"Apache-2.0"
] | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from unittest.mock import Mock, patch
from urllib.parse import urlencode
from polaris.tests.helpers import (
mock_check_auth_success,
mock_check_auth_success_with_memo,
)
from stellar_sdk.keypair import Keypair
endpoint = "/kyc/customer"
mock_success_integration = Mock(
get=Mock(return_value={"status": "ACCEPTED"}), put=Mock(return_value="123"),
)
@patch("polaris.sep12.customer.rci", mock_success_integration)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_success(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 202
assert response.json() == {"id": "123"}
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
@patch("polaris.sep12.customer.rci", mock_success_integration)
def test_put_existing_id(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
response = client.put(
endpoint,
data={
"id": response.json()["id"],
"first_name": "Test2",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 202
assert response.json() == {"id": "123"}
mock_raise_bad_id_error = Mock(put=Mock(side_effect=ObjectDoesNotExist("bad id")))
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
@patch("polaris.sep12.customer.rci", mock_raise_bad_id_error)
def test_bad_existing_id(client):
response = client.put(
endpoint,
data={
"id": "notanid",
"first_name": "Test2",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 404
assert response.json()["error"] == "bad id"
@patch("polaris.sep12.customer.rci", mock_success_integration)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_memo(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"memo": "text memo",
"memo_type": "text",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 202
assert response.json() == {"id": "123"}
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_bad_account(client):
response = client.put(
endpoint,
data={
"account": "doesn't match mocked auth",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 403
assert "error" in response.json()
def test_put_no_auth(client):
response = client.put(
endpoint,
data={
"account": "doesn't match mocked auth",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 403
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_bad_memo_type(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"memo": "text memo",
"memo_type": "not text",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_bad_memo(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"memo": 123,
"memo_type": "text",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_missing_memo(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"memo_type": "text",
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_put_missing_memo_type(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"memo": 123,
"first_name": "Test",
"email_address": "test@example.com",
},
content_type="application/json",
)
assert response.status_code == 400
assert "error" in response.json()
mock_put = Mock(return_value="123")
@patch("polaris.sep12.customer.rci.put", mock_put)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_sep9_params(client):
response = client.put(
endpoint,
data={
"account": "test source address",
"first_name": "Test",
"email_address": "test@example.com",
"not-a-sep9-param": 1,
},
content_type="application/json",
)
mock_put.assert_called_with(
{
"id": None,
"memo": None,
"memo_type": None,
"account": "test source address",
"first_name": "Test",
"email_address": "test@example.com",
}
)
mock_put.reset_mock()
assert response.status_code == 202
assert response.json() == {"id": "123"}
mock_get_accepted = Mock(return_value={"status": "ACCEPTED", "id": "123"})
@patch("polaris.sep12.customer.rci.get", mock_get_accepted)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_accepted(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 200
assert response.json() == {"status": "ACCEPTED", "id": "123"}
@patch("polaris.sep12.customer.rci.get", Mock())
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_bad_auth(client):
response = client.get(endpoint + "?" + urlencode({"account": "not a match"}))
assert response.status_code == 403
assert "error" in response.json()
def test_get_no_auth(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 403
assert "error" in response.json()
@patch("polaris.sep12.customer.rci", mock_success_integration)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_no_id_or_account(client):
response = client.get(endpoint)
assert response.status_code == 200
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_bad_memo_type(client):
response = client.get(
endpoint
+ "?"
+ urlencode(
{
"account": "test source address",
"memo": "text memo",
"memo_type": "not text",
}
),
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_bad_memo(client):
response = client.get(
endpoint
+ "?"
+ urlencode(
{
"account": "test source address",
"memo": "not a hash",
"memo_type": "hash",
}
),
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_missing_memo(client):
response = client.get(
endpoint
+ "?"
+ urlencode({"account": "test source address", "memo_type": "text",}),
)
assert response.status_code == 400
assert "error" in response.json()
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_get_missing_memo_type(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address", "memo": "123"})
)
assert response.status_code == 400
assert "error" in response.json()
@patch(
"polaris.sep12.customer.rci.get",
Mock(
return_value={
"id": "123",
"status": "NEEDS_INFO",
"fields": {
"email_address": {
"description": "Email address of the user",
"type": "string",
}
},
}
),
)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_valid_needs_info_response(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 200
assert response.json() == {
"id": "123",
"status": "NEEDS_INFO",
"fields": {
"email_address": {
"description": "Email address of the user",
"type": "string",
}
},
}
@patch(
"polaris.sep12.customer.rci.get",
Mock(
return_value={
"status": "NEEDS_INFO",
"fields": {
"not a sep9 field": {
"description": "good description",
"type": "string",
}
},
}
),
)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_bad_field_needs_info(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 500
@patch(
"polaris.sep12.customer.rci.get",
Mock(
return_value={
"status": "NEEDS_INFO",
"fields": {"email_address": {"description": "a description",}},
}
),
)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_missing_type_field_needs_info(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 500
@patch(
"polaris.sep12.customer.rci.get",
Mock(
return_value={
"status": "NEEDS_INFO",
"fields": {
"email_address": {"description": "a description", "unknown_field": True}
},
}
),
)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_unknown_attr_needs_info(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 500
@patch(
"polaris.sep12.customer.rci.get",
Mock(
return_value={
"status": "NEEDS_INFO",
"fields": {"email_address": {"type": "string"}},
}
),
)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_no_description_needs_info(client):
response = client.get(
endpoint + "?" + urlencode({"account": "test source address"})
)
assert response.status_code == 500
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_delete_success(client):
response = client.delete("/".join([endpoint, "test source address"]))
assert response.status_code == 200
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_bad_auth_delete(client):
response = client.delete("/".join([endpoint, Keypair.random().public_key]))
assert response.status_code == 404
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_bad_memo_delete(client):
response = client.delete(
"/".join([endpoint, "test source address"]),
data={"memo": "not a valid hash memo", "memo_type": "hash"},
content_type="application/json",
)
assert response.status_code == 400
assert "memo" in response.json()["error"]
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
@patch("polaris.sep12.customer.rci.delete")
def test_delete_memo_customer(mock_delete, client):
response = client.delete(
"/".join([endpoint, "test source address"]),
data={"memo": "test memo string", "memo_type": "text"},
content_type="application/json",
)
assert response.status_code == 200
mock_delete.assert_called_with("test source address", "test memo string", "text")
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
@patch("polaris.sep12.customer.rci.delete")
def test_delete_memo_customer_with_memo(mock_delete, client):
response = client.delete(
"/".join([endpoint, "test source address"]),
data={"memo": "test memo string", "memo_type": "text"},
content_type="application/json",
)
assert response.status_code == 200
mock_delete.assert_called_with("test source address", "test memo string", "text")
mock_bad_delete = Mock(side_effect=ObjectDoesNotExist)
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
@patch("polaris.sep12.customer.rci.delete", mock_bad_delete)
def test_delete_memo_not_found(client):
response = client.delete(
"/".join([endpoint, "test source address"]),
data={"memo": "test memo string", "memo_type": "text"},
content_type="application/json",
)
assert response.status_code == 404
mock_bad_delete.assert_called_with(
"test source address", "test memo string", "text"
)
| 29.526971
| 88
| 0.612634
| 1,600
| 14,232
| 5.21625
| 0.069375
| 0.062545
| 0.046729
| 0.071891
| 0.89492
| 0.87503
| 0.870717
| 0.864965
| 0.849629
| 0.840163
| 0
| 0.020812
| 0.247119
| 14,232
| 481
| 89
| 29.588358
| 0.758096
| 0
| 0
| 0.64878
| 0
| 0
| 0.274452
| 0.091976
| 0
| 0
| 0
| 0
| 0.131707
| 1
| 0.073171
| false
| 0
| 0.012195
| 0
| 0.085366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2637cfb0bf86764434a09bf9a4440835ecbd6fe
| 24
|
py
|
Python
|
speaksee/evaluation/spice/__init__.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 29
|
2019-02-28T05:29:53.000Z
|
2021-01-25T06:55:48.000Z
|
speaksee/evaluation/spice/__init__.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 2
|
2019-10-26T02:29:59.000Z
|
2021-01-15T13:58:53.000Z
|
speaksee/evaluation/spice/__init__.py
|
aimagelab/speaksee
|
63700a4062e2ae00132a5c77007604fdaf4bd00b
|
[
"BSD-3-Clause"
] | 11
|
2019-03-12T08:43:09.000Z
|
2021-03-15T03:20:43.000Z
|
from .spice import Spice
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2a29ee706126390fba24b52eeaceb6a50f97b1f
| 142
|
py
|
Python
|
info.py
|
NuCOS/nucosMQ
|
a66d25bb71eaaa176710ab8da820de90421760b3
|
[
"MIT"
] | 1
|
2017-10-10T17:56:57.000Z
|
2017-10-10T17:56:57.000Z
|
info.py
|
NuCOS/nucosObs
|
ff75a78efb7709cb57dfc91dab96d94c2c1d491b
|
[
"MIT"
] | 1
|
2021-01-15T12:38:15.000Z
|
2021-01-15T12:38:15.000Z
|
info.py
|
NuCOS/nucosCR
|
2fac4932603e00a615c73c73c58a156704ff4fa1
|
[
"MIT"
] | 1
|
2018-04-08T07:56:22.000Z
|
2018-04-08T07:56:22.000Z
|
from __future__ import print_function
import sys
import setuptools
print ("VERSION: ",sys.version_info)
print ("PATH: ", setuptools.__file__)
| 23.666667
| 37
| 0.795775
| 18
| 142
| 5.722222
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105634
| 142
| 5
| 38
| 28.4
| 0.811024
| 0
| 0
| 0
| 0
| 0
| 0.105634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.6
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
a2592c3096510a48b7429c4a82edae9de7d5fe13
| 7,170
|
py
|
Python
|
quadpy/quadrilateral/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/quadrilateral/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/quadrilateral/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
from ..ncube import ncube_points as rectangle_points
from ..ncube import transform
from ._albrecht_collatz import (
albrecht_collatz_1,
albrecht_collatz_2,
albrecht_collatz_3,
albrecht_collatz_4,
)
from ._burnside import burnside
from ._cohen_gismalla import cohen_gismalla_1, cohen_gismalla_2
from ._cools_haegemans_1985 import (
cools_haegemans_1985_1,
cools_haegemans_1985_2,
cools_haegemans_1985_3,
)
from ._cools_haegemans_1988 import cools_haegemans_1988_1, cools_haegemans_1988_2
from ._dunavant import (
dunavant_00,
dunavant_01,
dunavant_02,
dunavant_03,
dunavant_04,
dunavant_05,
dunavant_06,
dunavant_07,
dunavant_08,
dunavant_09,
dunavant_10,
)
from ._franke import (
franke_1,
franke_2a,
franke_2b,
franke_3a,
franke_3b,
franke_3c,
franke_5,
franke_6,
franke_8,
)
from ._haegemans_piessens import haegemans_piessens
from ._hammer_stroud import hammer_stroud_1_2, hammer_stroud_2_2, hammer_stroud_3_2
from ._irwin import irwin_1, irwin_2
from ._maxwell import maxwell
from ._meister import meister
from ._miller import miller
from ._morrow_patterson import morrow_patterson_1, morrow_patterson_2
from ._phillips import phillips
from ._piessens_haegemans import piessens_haegemans_1, piessens_haegemans_2
from ._product import product
from ._rabinowitz_richter import (
rabinowitz_richter_1,
rabinowitz_richter_2,
rabinowitz_richter_3,
rabinowitz_richter_4,
rabinowitz_richter_5,
rabinowitz_richter_6,
)
from ._schmid import schmid_2, schmid_4, schmid_6
from ._sommariva import (
sommariva_01,
sommariva_02,
sommariva_03,
sommariva_04,
sommariva_05,
sommariva_06,
sommariva_07,
sommariva_08,
sommariva_09,
sommariva_10,
sommariva_11,
sommariva_12,
sommariva_13,
sommariva_14,
sommariva_15,
sommariva_16,
sommariva_17,
sommariva_18,
sommariva_19,
sommariva_20,
sommariva_21,
sommariva_22,
sommariva_23,
sommariva_24,
sommariva_25,
sommariva_26,
sommariva_27,
sommariva_28,
sommariva_29,
sommariva_30,
sommariva_31,
sommariva_32,
sommariva_33,
sommariva_34,
sommariva_35,
sommariva_36,
sommariva_37,
sommariva_38,
sommariva_39,
sommariva_40,
sommariva_41,
sommariva_42,
sommariva_43,
sommariva_44,
sommariva_45,
sommariva_46,
sommariva_47,
sommariva_48,
sommariva_49,
sommariva_50,
sommariva_51,
sommariva_52,
sommariva_53,
sommariva_54,
sommariva_55,
)
from ._stroud import (
stroud_c2_1_1,
stroud_c2_1_2,
stroud_c2_3_1,
stroud_c2_3_2,
stroud_c2_3_3,
stroud_c2_3_4,
stroud_c2_3_5,
stroud_c2_5_1,
stroud_c2_5_2,
stroud_c2_5_3,
stroud_c2_5_4,
stroud_c2_5_5,
stroud_c2_5_6,
stroud_c2_5_7,
stroud_c2_7_1,
stroud_c2_7_2,
stroud_c2_7_3,
stroud_c2_7_4,
stroud_c2_7_5,
stroud_c2_7_6,
stroud_c2_9_1,
stroud_c2_11_1,
stroud_c2_11_2,
stroud_c2_13_1,
stroud_c2_15_1,
stroud_c2_15_2,
)
from ._tyler import tyler_1, tyler_2, tyler_3
from ._waldron import waldron
from ._wissmann_becker import (
wissmann_becker_4_1,
wissmann_becker_4_2,
wissmann_becker_6_1,
wissmann_becker_6_2,
wissmann_becker_8_1,
wissmann_becker_8_2,
)
from ._witherden_vincent import (
witherden_vincent_01,
witherden_vincent_03,
witherden_vincent_05,
witherden_vincent_07,
witherden_vincent_09,
witherden_vincent_11,
witherden_vincent_13,
witherden_vincent_15,
witherden_vincent_17,
witherden_vincent_19,
witherden_vincent_21,
)
__all__ = [
"albrecht_collatz_1",
"albrecht_collatz_2",
"albrecht_collatz_3",
"albrecht_collatz_4",
"burnside",
"cohen_gismalla_1",
"cohen_gismalla_2",
"cools_haegemans_1985_1",
"cools_haegemans_1985_2",
"cools_haegemans_1985_3",
"cools_haegemans_1988_1",
"cools_haegemans_1988_2",
"dunavant_00",
"dunavant_01",
"dunavant_02",
"dunavant_03",
"dunavant_04",
"dunavant_05",
"dunavant_06",
"dunavant_07",
"dunavant_08",
"dunavant_09",
"dunavant_10",
"franke_1",
"franke_2a",
"franke_2b",
"franke_3a",
"franke_3b",
"franke_3c",
"franke_5",
"franke_6",
"franke_8",
"hammer_stroud_1_2",
"hammer_stroud_2_2",
"hammer_stroud_3_2",
"haegemans_piessens",
"irwin_1",
"irwin_2",
"maxwell",
"meister",
"miller",
"morrow_patterson_1",
"morrow_patterson_2",
"phillips",
"piessens_haegemans_1",
"piessens_haegemans_2",
"rabinowitz_richter_1",
"rabinowitz_richter_2",
"rabinowitz_richter_3",
"rabinowitz_richter_4",
"rabinowitz_richter_5",
"rabinowitz_richter_6",
"schmid_2",
"schmid_4",
"schmid_6",
"sommariva_01",
"sommariva_02",
"sommariva_03",
"sommariva_04",
"sommariva_05",
"sommariva_06",
"sommariva_07",
"sommariva_08",
"sommariva_09",
"sommariva_10",
"sommariva_11",
"sommariva_12",
"sommariva_13",
"sommariva_14",
"sommariva_15",
"sommariva_16",
"sommariva_17",
"sommariva_18",
"sommariva_19",
"sommariva_20",
"sommariva_21",
"sommariva_22",
"sommariva_23",
"sommariva_24",
"sommariva_25",
"sommariva_26",
"sommariva_27",
"sommariva_28",
"sommariva_29",
"sommariva_30",
"sommariva_31",
"sommariva_32",
"sommariva_33",
"sommariva_34",
"sommariva_35",
"sommariva_36",
"sommariva_37",
"sommariva_38",
"sommariva_39",
"sommariva_40",
"sommariva_41",
"sommariva_42",
"sommariva_43",
"sommariva_44",
"sommariva_45",
"sommariva_46",
"sommariva_47",
"sommariva_48",
"sommariva_49",
"sommariva_50",
"sommariva_51",
"sommariva_52",
"sommariva_53",
"sommariva_54",
"sommariva_55",
"stroud_c2_1_1",
"stroud_c2_1_2",
"stroud_c2_3_1",
"stroud_c2_3_2",
"stroud_c2_3_3",
"stroud_c2_3_4",
"stroud_c2_3_5",
"stroud_c2_5_1",
"stroud_c2_5_2",
"stroud_c2_5_3",
"stroud_c2_5_4",
"stroud_c2_5_5",
"stroud_c2_5_6",
"stroud_c2_5_7",
"stroud_c2_7_1",
"stroud_c2_7_2",
"stroud_c2_7_3",
"stroud_c2_7_4",
"stroud_c2_7_5",
"stroud_c2_7_6",
"stroud_c2_9_1",
"stroud_c2_11_1",
"stroud_c2_11_2",
"stroud_c2_13_1",
"stroud_c2_15_1",
"stroud_c2_15_2",
"tyler_1",
"tyler_2",
"tyler_3",
"waldron",
"wissmann_becker_4_1",
"wissmann_becker_4_2",
"wissmann_becker_6_1",
"wissmann_becker_6_2",
"wissmann_becker_8_1",
"wissmann_becker_8_2",
"witherden_vincent_01",
"witherden_vincent_03",
"witherden_vincent_05",
"witherden_vincent_07",
"witherden_vincent_09",
"witherden_vincent_11",
"witherden_vincent_13",
"witherden_vincent_15",
"witherden_vincent_17",
"witherden_vincent_19",
"witherden_vincent_21",
"product",
#
"transform",
"rectangle_points",
]
| 21.596386
| 83
| 0.683682
| 936
| 7,170
| 4.637821
| 0.11859
| 0.09583
| 0.033172
| 0.009214
| 0.822852
| 0.822852
| 0.760654
| 0.760654
| 0.743147
| 0.743147
| 0
| 0.113722
| 0.222455
| 7,170
| 331
| 84
| 21.661631
| 0.664933
| 0
| 0
| 0
| 0
| 0
| 0.299623
| 0.015344
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.082067
| 0
| 0.082067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2c07088af39cefb7cf0fa1759368002e597cf65
| 797
|
py
|
Python
|
trash/server/queue/main.py
|
josmejia2401/face_recognition_python
|
6155bfcb6e0dcb5cc3441ff3192ecf83fd1dcf1b
|
[
"MIT"
] | null | null | null |
trash/server/queue/main.py
|
josmejia2401/face_recognition_python
|
6155bfcb6e0dcb5cc3441ff3192ecf83fd1dcf1b
|
[
"MIT"
] | null | null | null |
trash/server/queue/main.py
|
josmejia2401/face_recognition_python
|
6155bfcb6e0dcb5cc3441ff3192ecf83fd1dcf1b
|
[
"MIT"
] | null | null | null |
from multiprocessing.managers import BaseManager
import queue
queue = queue.Queue()
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue', callable=lambda:queue)
m = QueueManager(address=('', 50000), authkey=b'abracadabra')
s = m.get_server()
s.serve_forever()
#put
from multiprocessing.managers import BaseManager
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
m = QueueManager(address=('foo.bar.org', 50000), authkey=b'abracadabra')
m.connect()
queue = m.get_queue()
queue.put('hello')
# get
from multiprocessing.managers import BaseManager
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
m = QueueManager(address=('foo.bar.org', 50000), authkey=b'abracadabra')
m.connect()
queue = m.get_queue()
queue.get()
| 25.709677
| 72
| 0.764115
| 100
| 797
| 6.02
| 0.29
| 0.083056
| 0.134552
| 0.164452
| 0.784053
| 0.710963
| 0.710963
| 0.710963
| 0.611296
| 0.611296
| 0
| 0.020921
| 0.100376
| 797
| 31
| 73
| 25.709677
| 0.818689
| 0.008783
| 0
| 0.68
| 0
| 0
| 0.110266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.12
| 0.16
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a2d2b6c753772ad021e6eccc8b19f61d6d25d1ed
| 40
|
py
|
Python
|
brainframe_qt/ui/main_window/activities/identity_configuration/encoding_list/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 17
|
2021-02-11T18:19:22.000Z
|
2022-02-08T06:12:50.000Z
|
brainframe_qt/ui/main_window/activities/identity_configuration/encoding_list/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 80
|
2021-02-11T08:27:31.000Z
|
2021-10-13T21:33:22.000Z
|
brainframe_qt/ui/main_window/activities/identity_configuration/encoding_list/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 5
|
2021-02-12T09:51:34.000Z
|
2022-02-08T09:25:15.000Z
|
from .encoding_list import EncodingList
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a75c625f0f2c3a2e3fe73fcf1c0dea8612223cb4
| 141
|
py
|
Python
|
src/simprocedure/UnConstraintRetValue.py
|
alikmli/HeapOverFlow-Detection
|
609082881af9788c4ef351754aecbb1e31eff475
|
[
"MIT"
] | null | null | null |
src/simprocedure/UnConstraintRetValue.py
|
alikmli/HeapOverFlow-Detection
|
609082881af9788c4ef351754aecbb1e31eff475
|
[
"MIT"
] | null | null | null |
src/simprocedure/UnConstraintRetValue.py
|
alikmli/HeapOverFlow-Detection
|
609082881af9788c4ef351754aecbb1e31eff475
|
[
"MIT"
] | null | null | null |
import angr,claripy
class ExeFunc(angr.SimProcedure):
def run(*argv):
pass
#return claripy.BVS('UNConstrainRetValue',8)
| 20.142857
| 52
| 0.673759
| 16
| 141
| 5.9375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 0.212766
| 141
| 6
| 53
| 23.5
| 0.846847
| 0.304965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
a7bd6015290d947a1d8319b1cfe598a6b20a95cd
| 76
|
py
|
Python
|
src/ipynta/packaging/__init__.py
|
allanchua101/ipynta
|
861c36b1c2d675611fcd5ed478d658f8180d03af
|
[
"MIT"
] | null | null | null |
src/ipynta/packaging/__init__.py
|
allanchua101/ipynta
|
861c36b1c2d675611fcd5ed478d658f8180d03af
|
[
"MIT"
] | null | null | null |
src/ipynta/packaging/__init__.py
|
allanchua101/ipynta
|
861c36b1c2d675611fcd5ed478d658f8180d03af
|
[
"MIT"
] | null | null | null |
from .tar_packager import TarPackager
from .zip_packager import ZipPackager
| 25.333333
| 37
| 0.868421
| 10
| 76
| 6.4
| 0.7
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 76
| 2
| 38
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7cf0b21d2e981e8094886ebb46e37b5c00b54fb
| 12,394
|
py
|
Python
|
tests/unit/states/test_win_network.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2018-05-01T20:51:14.000Z
|
2021-11-09T05:43:00.000Z
|
tests/unit/states/test_win_network.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/unit/states/test_win_network.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Rahul Handay <rahulha@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.states.win_network as win_network
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
'''
Validate the nftables state
'''
def setup_loader_modules(self):
return {win_network: {}}
def test_managed_missing_parameters(self):
'''
Test to ensure that the named interface is configured properly.
'''
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': 'dns_proto must be one of the following: static, dhcp\n'
'ip_proto must be one of the following: static, dhcp'}
self.assertDictEqual(win_network.managed('salt'), ret)
def test_managed_static_enabled_false(self):
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': 'Interface \'salt\' is up to date (already disabled)'}
mock_false = MagicMock(return_value=False)
with patch.dict(win_network.__salt__, {"ip.is_enabled": mock_false}):
self.assertDictEqual(
win_network.managed(
'salt', dns_proto='static', ip_proto='static', enabled=False),
ret)
def test_managed_test_true(self):
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': 'Failed to enable interface \'salt\' to make changes'}
mock_false = MagicMock(return_value=False)
with patch.dict(win_network.__salt__, {"ip.is_enabled": mock_false,
"ip.enable": mock_false}), \
patch.dict(win_network.__opts__, {"test": False}):
self.assertDictEqual(
win_network.managed(
'salt', dns_proto='static', ip_proto='static'),
ret)
def test_managed_validate_errors(self):
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': 'The following SLS configuration errors were '
'detected:\n'
'- First Error\n'
'- Second Error'}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=['First Error', 'Second Error'])
with patch.dict(win_network.__salt__, {"ip.is_enabled": mock_true}),\
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed(
'salt', dns_proto='static', ip_proto='static'),
ret)
def test_managed_get_current_config_failed(self):
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': 'Unable to get current configuration for interface '
'\'salt\''}
mock_true = MagicMock(return_value=True)
mock_false = MagicMock(return_value=False)
mock_validate = MagicMock(return_value=[])
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_false}), \
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed('salt', dns_proto='dhcp', ip_proto='dhcp'),
ret)
def test_managed_test_true_no_changes(self):
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': 'Interface \'salt\' is up to date'}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(return_value={
'DHCP enabled': 'yes',
'DNS servers configured through DHCP': '192.168.0.10'})
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int}), \
patch.dict(win_network.__opts__, {"test": True}), \
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed('salt', dns_proto='dhcp', ip_proto='dhcp'),
ret)
def test_managed_test_true_changes(self):
ret = {'name': 'salt',
'changes': {},
'result': None,
'comment': 'The following changes will be made to interface '
'\'salt\':\n'
'- DNS protocol will be changed to: dhcp'}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(return_value={
'DHCP enabled': 'no',
'Statically Configured DNS Servers': '192.168.0.10'})
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int}), \
patch.dict(win_network.__opts__, {"test": True}), \
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed('salt', dns_proto='dhcp', ip_proto='dhcp'),
ret)
def test_managed_failed(self):
ret = {'name': 'salt',
'changes': {},
'result': False,
'comment': 'Failed to set desired configuration settings for '
'interface \'salt\''}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(return_value={
'DHCP enabled': 'no',
'Statically Configured DNS Servers': '192.168.0.10'})
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int,
'ip.set_dhcp_dns': mock_true,
'ip.set_dhcp_ip': mock_true}), \
patch.dict(win_network.__opts__, {"test": False}), \
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed('salt', dns_proto='dhcp', ip_proto='dhcp'),
ret)
def test_managed(self):
ret = {'name': 'salt',
'changes': {
'DHCP enabled': {
'new': 'yes',
'old': 'no'},
'DNS servers configured through DHCP': {
'new': '192.168.0.10',
'old': ''},
'Statically Configured DNS Servers': {
'new': '',
'old': '192.168.0.10'
}
},
'result': True,
'comment': 'Successfully updated configuration for interface '
'\'salt\''}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(side_effect=[
{'DHCP enabled': 'no', 'Statically Configured DNS Servers': '192.168.0.10'},
{'DHCP enabled': 'yes', 'DNS servers configured through DHCP': '192.168.0.10'},
])
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int,
'ip.set_dhcp_dns': mock_true,
'ip.set_dhcp_ip': mock_true}), \
patch.dict(win_network.__opts__, {"test": False}), \
patch.object(win_network, '_validate', mock_validate):
self.assertDictEqual(
win_network.managed('salt', dns_proto='dhcp', ip_proto='dhcp'),
ret)
def test_managed_static_dns_clear(self):
expected = {'name': 'salt',
'changes': {
'Statically Configured DNS Servers': {
'new': 'None',
'old': '192.168.0.10'
}
},
'result': True,
'comment': 'Successfully updated configuration for '
'interface \'salt\''}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(side_effect=[
{'DHCP enabled': 'no', 'Statically Configured DNS Servers': '192.168.0.10'},
{'DHCP enabled': 'no', 'Statically Configured DNS Servers': 'None'},
])
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int,
'ip.set_static_dns': mock_true}), \
patch.dict(win_network.__opts__, {"test": False}), \
patch.object(win_network, '_validate', mock_validate):
ret = win_network.managed(
'salt', dns_proto='static', dns_servers=[], ip_proto='dhcp')
self.assertDictEqual(ret, expected)
def test_managed_static_dns(self):
expected = {'name': 'salt',
'changes': {
'Statically Configured DNS Servers': {
'new': '192.168.0.10',
'old': 'None'
}
},
'result': True,
'comment': 'Successfully updated configuration for '
'interface \'salt\''}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(side_effect=[
{'DHCP enabled': 'no', 'Statically Configured DNS Servers': 'None'},
{'DHCP enabled': 'no', 'Statically Configured DNS Servers': '192.168.0.10'},
])
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int,
'ip.set_static_dns': mock_true}), \
patch.dict(win_network.__opts__, {"test": False}), \
patch.object(win_network, '_validate', mock_validate):
ret = win_network.managed(
'salt', dns_proto='static', dns_servers=['192.168.0.10'], ip_proto='dhcp')
self.assertDictEqual(ret, expected)
def test_managed_static_dns_no_action(self):
expected = {'name': 'salt',
'changes': {},
'result': True,
'comment': 'Interface \'salt\' is up to date'}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(return_value={
'DHCP enabled': 'no',
'Statically Configured DNS Servers': '192.168.0.10'
})
with patch.dict(win_network.__salt__, {'ip.is_enabled': mock_true,
'ip.get_interface': mock_get_int,
'ip.set_static_dns': mock_true}), \
patch.dict(win_network.__opts__, {"test": False}), \
patch.object(win_network, '_validate', mock_validate):
# Don't pass dns_servers
ret = win_network.managed('salt', dns_proto='static', ip_proto='dhcp')
self.assertDictEqual(ret, expected)
# Pass dns_servers=None
ret = win_network.managed(
'salt', dns_proto='static', dns_servers=None, ip_proto='dhcp')
self.assertDictEqual(ret, expected)
| 45.903704
| 91
| 0.516944
| 1,213
| 12,394
| 4.988458
| 0.111294
| 0.072715
| 0.082631
| 0.05966
| 0.839531
| 0.806809
| 0.785159
| 0.755412
| 0.741696
| 0.713601
| 0
| 0.014937
| 0.362595
| 12,394
| 269
| 92
| 46.074349
| 0.751013
| 0.021785
| 0
| 0.689362
| 0
| 0
| 0.210278
| 0
| 0
| 0
| 0
| 0
| 0.055319
| 1
| 0.055319
| false
| 0
| 0.021277
| 0.004255
| 0.085106
| 0.004255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac7037a0268860ccf3d9124c95e8246d6841d2d0
| 129
|
py
|
Python
|
my_script.py
|
antauren/test_pip
|
593d64bddc479ce7c29d0dbabc89db2cabc7975c
|
[
"MIT"
] | null | null | null |
my_script.py
|
antauren/test_pip
|
593d64bddc479ce7c29d0dbabc89db2cabc7975c
|
[
"MIT"
] | null | null | null |
my_script.py
|
antauren/test_pip
|
593d64bddc479ce7c29d0dbabc89db2cabc7975c
|
[
"MIT"
] | null | null | null |
import utils2.math as math
import utils2.strings.str_utils as str_utils
print(math.add(10, 20))
print(str_utils.reverse("ABC"))
| 21.5
| 44
| 0.782946
| 23
| 129
| 4.26087
| 0.565217
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.093023
| 129
| 5
| 45
| 25.8
| 0.786325
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
3bb5a9ab6a42ef01d97c72735ae4c897c39f3056
| 221
|
py
|
Python
|
exercises/level_0/list_min_max.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 6
|
2019-03-29T06:14:53.000Z
|
2021-10-15T23:42:36.000Z
|
exercises/level_0/list_min_max.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 4
|
2019-09-06T10:03:40.000Z
|
2022-03-11T23:30:55.000Z
|
exercises/level_0/list_min_max.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 12
|
2019-06-20T19:34:52.000Z
|
2021-10-15T23:42:39.000Z
|
list1, list2 = [123, 567, 343, 611], [456, 700, 200]
print("Max value element : ", max(list1))
print("Max value element : ", max(list2))
print("min value element : ", min(list1))
print("min value element : ", min(list2))
| 36.833333
| 52
| 0.647059
| 33
| 221
| 4.333333
| 0.424242
| 0.335664
| 0.181818
| 0.27972
| 0.643357
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144385
| 0.153846
| 221
| 5
| 53
| 44.2
| 0.620321
| 0
| 0
| 0
| 0
| 0
| 0.361991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.8
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
3bd4a5db1b22eab7389c8b3efdd005c7f0a915fb
| 187
|
py
|
Python
|
barbearia/agendamento/__init__.py
|
FabioMarquesArao/OPE_BARBEARIA
|
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
|
[
"MIT"
] | null | null | null |
barbearia/agendamento/__init__.py
|
FabioMarquesArao/OPE_BARBEARIA
|
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
|
[
"MIT"
] | null | null | null |
barbearia/agendamento/__init__.py
|
FabioMarquesArao/OPE_BARBEARIA
|
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
agendamento_bp = Blueprint("agendamento", __name__, static_folder="agendamento_static", template_folder="templates")
from barbearia.agendamento import routes
| 37.4
| 116
| 0.839572
| 21
| 187
| 7.095238
| 0.619048
| 0.268456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080214
| 187
| 5
| 117
| 37.4
| 0.866279
| 0
| 0
| 0
| 0
| 0
| 0.202128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0233a17a70529d16664c718ccbbf7b91a46eb37a
| 31
|
py
|
Python
|
src/try.py
|
alelallele/learn_python_st
|
55b06efdf63135f3fff2c93508d25c5f4ae0db7e
|
[
"Apache-2.0"
] | null | null | null |
src/try.py
|
alelallele/learn_python_st
|
55b06efdf63135f3fff2c93508d25c5f4ae0db7e
|
[
"Apache-2.0"
] | null | null | null |
src/try.py
|
alelallele/learn_python_st
|
55b06efdf63135f3fff2c93508d25c5f4ae0db7e
|
[
"Apache-2.0"
] | null | null | null |
print("Assalamualaikum Dunia")
| 15.5
| 30
| 0.806452
| 3
| 31
| 8.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 31
| 1
| 31
| 31
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
023cdccb156d5fe6eae8793ecbcb561d8a091508
| 75
|
py
|
Python
|
amocrm_asterisk_ng/telephony/impl/redirect_to_responsible/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/redirect_to_responsible/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/telephony/impl/redirect_to_responsible/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .RedirectToResponsibleComponent import RedirectToResponsibleComponent
| 37.5
| 74
| 0.933333
| 4
| 75
| 17.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053333
| 75
| 1
| 75
| 75
| 0.985915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a1d28b26acb95a59f3028a6c346dab08548f699
| 898
|
py
|
Python
|
src/python/pants/backend/codegen/protobuf/java/register.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/protobuf/java/register.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/protobuf/java/register.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Generate Java targets from Protocol Buffers (Protobufs).
See https://developers.google.com/protocol-buffers/.
"""
from pants.backend.codegen.protobuf.java.java_protobuf_library import (
JavaProtobufLibrary as JavaProtobufLibraryV1,
)
from pants.backend.codegen.protobuf.java.protobuf_gen import ProtobufGen
from pants.backend.codegen.protobuf.java.targets import JavaProtobufLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(targets={"java_protobuf_library": JavaProtobufLibraryV1})
def register_goals():
task(name="protoc", action=ProtobufGen).install("gen")
def targets2():
return [JavaProtobufLibrary]
| 32.071429
| 85
| 0.806236
| 107
| 898
| 6.654206
| 0.504673
| 0.063202
| 0.067416
| 0.09691
| 0.147472
| 0.147472
| 0
| 0
| 0
| 0
| 0
| 0.011166
| 0.10245
| 898
| 27
| 86
| 33.259259
| 0.872208
| 0.265033
| 0
| 0
| 1
| 0
| 0.046012
| 0.032209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| true
| 0
| 0.384615
| 0.153846
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5a24642f5b7f32770c30b4c5d77704c6677f3f49
| 242
|
py
|
Python
|
app/codemirror/__init__.py
|
sappachok/django-anaconda
|
1ffd33ded759f622b6db23a3550a898b62350403
|
[
"MIT"
] | 39
|
2015-03-22T21:57:28.000Z
|
2021-11-04T08:17:15.000Z
|
app/codemirror/__init__.py
|
sappachok/django-anaconda
|
1ffd33ded759f622b6db23a3550a898b62350403
|
[
"MIT"
] | 67
|
2019-09-27T17:04:52.000Z
|
2022-03-21T22:16:23.000Z
|
app/codemirror/__init__.py
|
sappachok/django-datasci
|
1ffd33ded759f622b6db23a3550a898b62350403
|
[
"MIT"
] | 17
|
2015-09-08T15:52:15.000Z
|
2020-02-28T03:20:02.000Z
|
# -*- coding: utf-8 -*-
u"""
Library for using `CodeMirror` in Django.
"""
from codemirror.fields import CodeMirrorField, CodeMirrorFormField
from codemirror.utils import CodeMirrorJavascript
from codemirror.widgets import CodeMirrorTextarea
| 30.25
| 66
| 0.801653
| 26
| 242
| 7.461538
| 0.730769
| 0.216495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00463
| 0.107438
| 242
| 7
| 67
| 34.571429
| 0.893519
| 0.264463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce9f2ba1d839fbcf833b655e68da46871ddbf896
| 3,389
|
py
|
Python
|
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | 3
|
2021-03-19T22:25:51.000Z
|
2021-03-20T19:34:28.000Z
|
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | null | null | null |
tests/functional/test_config.py
|
koneksys/aras-oslc
|
92adb87b884014df5b82a1c5402592aabc916bc0
|
[
"MIT"
] | null | null | null |
import logging
from oslc_api.auth import login
from oslc_api.auth.models import User
log = logging.getLogger(__name__)
def test_components(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
res = oslc_api.get_components(item_type)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
def test_component(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test, load_validate_configs_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
mocker.patch(
'oslc_api.aras.resources.validate_config_id',
return_value=load_validate_configs_test
)
res = oslc_api.get_component(item_type, config_id)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
assert b'oslc_config:configurations' in res.data
def test_configurations(oslc_api, source_base_uri, access_token, item_values,
mocker, load_item_types_test, load_items_test, load_validate_configs_test,
load_resource_shape_test):
@login.request_loader
def load_user_from_request(request):
return User(username='admin', access_token=access_token)
item_type = item_values[0]
config_id = item_values[1]
if 'localhost' in source_base_uri:
mocker.patch(
'oslc_api.aras.resources.load_item_types',
return_value=load_item_types_test
)
mocker.patch(
'oslc_api.aras.resources.load_items',
return_value=load_items_test
)
mocker.patch(
'oslc_api.aras.resources.validate_config_id',
return_value=load_resource_shape_test
)
mocker.patch(
'oslc_api.aras.resources.load_streams',
return_value=load_validate_configs_test
)
res = oslc_api.get_configurations(item_type, config_id)
assert res is not None
assert res.status_code == 200, 'The request was not successful'
assert config_id.encode('ascii') in res.data, 'The response does not contain the config id'
assert b'rdfs:member' in res.data
| 32.902913
| 98
| 0.678961
| 451
| 3,389
| 4.751663
| 0.157428
| 0.05553
| 0.054596
| 0.075595
| 0.870742
| 0.864676
| 0.864676
| 0.864676
| 0.846477
| 0.846477
| 0
| 0.005864
| 0.245205
| 3,389
| 102
| 99
| 33.22549
| 0.8319
| 0
| 0
| 0.658228
| 0
| 0
| 0.192387
| 0.107701
| 0
| 0
| 0
| 0
| 0.139241
| 1
| 0.075949
| false
| 0
| 0.037975
| 0.037975
| 0.151899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ceaacb2b94e8999fac50a224bbdd3dea062d7950
| 177
|
py
|
Python
|
app/main/error.py
|
IsaiahKe/Personal-Blog
|
5ec76513bb8710ba3c92c515fddf00f0b3dc8975
|
[
"MIT"
] | null | null | null |
app/main/error.py
|
IsaiahKe/Personal-Blog
|
5ec76513bb8710ba3c92c515fddf00f0b3dc8975
|
[
"MIT"
] | null | null | null |
app/main/error.py
|
IsaiahKe/Personal-Blog
|
5ec76513bb8710ba3c92c515fddf00f0b3dc8975
|
[
"MIT"
] | null | null | null |
from flask import render_template
from . import main
@main.errorhandler(404)
def notfound():
'''
error function
'''
return render_template('notfound.html'),404
| 17.7
| 47
| 0.694915
| 21
| 177
| 5.761905
| 0.666667
| 0.231405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041958
| 0.19209
| 177
| 9
| 48
| 19.666667
| 0.804196
| 0.079096
| 0
| 0
| 0
| 0
| 0.088435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ceb1c880c1797da773391c2d5e7e2c5fe9d4761c
| 9,567
|
py
|
Python
|
idl2py/wcs/xyad.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
idl2py/wcs/xyad.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
idl2py/wcs/xyad.py
|
RapidLzj/idl2py
|
193051cd8d01db0d125b8975713b885ad521a992
|
[
"MIT"
] | null | null | null |
"""
By Dr Jie Zheng -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def xyad():
pass
#pro xyad, hdr, x, y, a, d, PRINT = print, GALACTIC = galactic, ALT = alt, $
# CELESTIAL = celestial, ECLIPTIC = ecliptic, PRECISION = precision
#;+
#; NAME:
#; XYAD
#; PURPOSE:
#; Use a FITS header to convert pixel (X,Y) to world coordinates
#; EXPLANATION:
#; Use astrometry in a FITS image header to compute world
#; coordinates in decimal degrees from X and Y.
#;
#; If spherical coordinates (Calabretta & Greisen 2002, A&A, 395, 1077) are
#; not present, then XYAD will still perform the transformation specified
#; by the CD, CRVAL, and CRPIX keywords.
#; CALLING SEQUENCE:
#; XYAD, HDR ;Prompt for X and Y positions
#; XYAD, HDR, X, Y, A, D, [ /PRINT, /Galactic, /Celestial, /Ecliptic,
#; ALT =, PRECISION=]
#; INPUTS:
#; HDR - FITS Image header containing astrometry info
#;
#; OPTIONAL INPUTS:
#; X - row position in pixels, scalar or vector
#; Y - column position in pixels, scalar or vector
#;
#; X and Y should be in IDL convention, (first pixel is (0,0) where
#; the integral value corresponds to the center of the pixel.)
#;
#; OPTIONAL OUTPUT:
#; A - Output longitude in decimal DEGREES (for spherical coordinates),
#; same number of elements as X and Y. For celestial
#; coordinates, this is the Right ascension.
#; D - Output latitude in decimal DEGREES. For celestial coordinates,
#; this is the declination.
#; OPTIONAL KEYWORD INPUT:
#; ALT - single character 'A' through 'Z' or ' ' specifying an alternate
#; astrometry system present in the FITS header. The default is
#; to use the primary astrometry or ALT = ' '. If /ALT is set,
#; then this is equivalent to ALT = 'A'. See Section 3.3 of
#; Greisen & Calabretta (2002, A&A, 395, 1061) for information about
#; alternate astrometry keywords.
#; PRECISION - Integer scalar (0-4) specifying the number of digits
#; displayed after the decimal of declination. The RA is
#; automatically one digit more. See ADSTRING() for more info.
#; Default value is 1, and the keyword is ignored if results are not
#; displayed at the terminal
#; /PRINT - If this keyword is set and non-zero, then results are displayed
#; at the terminal.in both decimal and sexagesimal notation.
#;
#; The default for XYAD is to return the coordinate system present in
#; in the FITS header. However, the following mutually exclusive
#; keywords can be used to convert to a particular coordinate system:
#;
#; /CELESTIAL - Output is Right Ascension and declination
#; /ECLIPTIC - Output is Ecliptic longitude and latitude
#; /GALACTIC - Output is Galactic longitude and latitude
#; Celestial & Ecliptic coords depend on the reference
#; equinox, set to either B1950 (=FK4) or J2000 (=FK5,ICRS)
#; according to the header or standard FITS WCS defaults.
#; Note that astrometry at the sub-arcsec level requires
#; fine distinctions that are not handled here.
#;
#; OPERATIONAL NOTES:
#; If less than 5 parameters are supplied, or if the /PRINT keyword is
#; set, then the computed astronomical coordinates are displayed at the
#; terminal.
#;
#; If this procedure is to be used repeatedly with the same header,
#; then it would be faster to use XY2AD.
#;
#; EXAMPLE:
#; A FITS header, hdr, contains astrometric information in celestial
#; coordinates. Find the RA and Dec corresponding to position X=23.3
#; Y = 100.2 on an image
#; IDL> xyad, hdr, 23.3, 100.2 ;Displays results at the terminal
#; To display the results in Galactic coordinates
#; IDL> xyad, hdr, 23.3, 100.2, /GALACTIC
#;
#; PROCEDURES CALLED
#; ADSTRING(), EULER, EXTAST, GET_EQUINOX(), GSSSXYAD, REPCHR(), XY2AD
#;
#; REVISION HISTORY:
#; W. Landsman STX Jan, 1988
#; Use astrometry structure W. Landsman Jan, 1994
#; Recognize GSSS header W. Landsman June, 1994
#; Changed ADSTRING output format W. Landsman September 1995
#; Use vector call to ADSTRING() W. Landsman February 2000
#; Added ALT input keyword W. Landsman June 2003
#; Add precision keyword W. Landsman February 2004
#; Fix display if 'RA','DEC' reversed in CTYPE W. Landsman Feb. 2004
#; Handle display of NaN values W. Landsman May 2004
#; Work for non-spherical coordinate transformations W. Landsman Oct 2004
#; Fix output display units if ALT keyword used W. Landsman March 2005
#; More informative error message if no astrometry present W.L Nov 2007
#; Fix display when no equinox in header W.L. Dec 2007
#; Fix header display for noncelestial coords W.L. Jan 2008
#; Check for non-standard projections, set FK4 flag. J. P. Leahy Jul 2013
#;-
# compile_opt idl2
# On_error,2
#
# npar = N_params()
# if ( npar EQ 0 ) then begin
# print,'Syntax - XYAD, hdr, [x, y, a, d, /PRINT, Alt=, Precision=, '
# print,' /Galactic, /Celestial, /Ecliptic ]'
# print,'HDR - FITS header (string array) containing astrometry'
# print,'X,Y - Input X and Y positions (scalar or vector)'
# print,'A,D - Output RA and Dec in decimal degrees'
# return
# endif
#
# extast, hdr, astr, noparams, ALT = alt ;Extract astrometry structure
#
# if ( noparams LT 0 ) then begin
# if alt EQ '' then $
# message,'ERROR - No astrometry info in supplied FITS header' $
# else message, $
# 'ERROR - No alt=' + alt + ' astrometry info in supplied FITS header'
# endif
#
# astr2 = TAG_EXIST(astr,'AXES')
#
# if ( npar lt 3 ) then read,'XYAD: Enter X and Y positions: ',x,y
#
# case strmid(astr.ctype[0],5,3) of
# 'GSS': gsssxyad, astr, x, y, a, d
# else: xy2ad, x, y, astr, a, d
# endcase
# titname = strmid(astr.ctype,0,4)
# if (titname[0] EQ 'DEC-') || (titname[0] EQ 'ELAT') or $
# (titname[0] EQ 'GLAT') then titname = rotate(titname,2)
#
# eqnx = Get_Equinox(hdr,code)
# IF astr2 THEN FK4 = STRMID(astr.RADECSYS,0,3) EQ 'FK4' ELSE $
# FK4 = eqnx EQ 1950
#
# if keyword_set(GALACTIC) then begin
# case titname[0] of
# 'RA--': euler, a,d, select=1, FK4=fk4
# 'ELON': euler, a,d, select=5, FK4=fk4
# 'GLON':
# else: MESSAGE, "doesn't know how to convert from "+titname
# endcase
# titname = ['GLON','GLAT']
# endif else if keyword_set(ECLIPTIC) then begin
# case titname[0] of
# 'RA--': euler, a, d, select=3, FK4=fk4
# 'ELON':
# 'GLON': euler, a,d, select=6, FK4=fk4
# else: MESSAGE, "doesn't know how to convert from "+titname
# endcase
# titname = ['ELON','ELAT']
# endif else if keyword_set(CELESTIAL) then begin
# case titname[0] of
# 'RA--':
# 'ELON': euler, a, d, select=4, FK4 = FK4
# 'GLON': euler, a,d, select=2, FK4 = FK4
# else: MESSAGE, "doesn't know how to convert from "+titname
# endcase
# titname = ['RA--','DEC-']
# endif
#
# if (npar lt 5) or keyword_set(PRINT) then begin
# g = where( finite(d) and finite(a), Ng)
# tit1= titname[0]
# t1 = strpos(tit1,'-')
# if t1 gt 0 then tit1 = strmid(tit1,0,t1)
# tit2= titname[1]
# t1 = strpos(tit2,'-')
# if t1 gt 0 then tit2 = strmid(tit2,0,t1)
# npts = N_elements(X)
# spherical = strmid(astr.ctype[0],4,1) EQ '-'
# fmt = '(2F8.2,2x,2F9.4,2x,A)'
# if spherical then begin
#
# tit = ' X Y ' + tit1 + ' ' + tit2
# sexig = strmid(titname[0],0,4) EQ 'RA--'
# if sexig then begin
#
# eqnx = code NE -1 ? '_' + string(eqnx,f='(I4)') : ' '
# tit += $
# ' ' + tit1 + eqnx + ' ' + tit2 + eqnx
# if N_elements(precision) EQ 0 then precision = 1
# str = replicate(' --- --- ', Npts)
# if Ng GT 0 then str[g] = adstring(a[g],d[g],precision)
# endif else str = replicate('', npts)
# print,tit
# for i=0l, npts-1 do $
# print,FORMAT=fmt, float(x[i]), float(y[i]), a[i], d[i], str[i]
#
# endif else begin
# unit1 = strtrim( sxpar( hdr, 'CUNIT1'+alt,count = N_unit1),2)
# if N_unit1 EQ 0 then unit1 = ''
# unit2 = strtrim( sxpar( hdr, 'CUNIT2'+alt,count = N_unit2),2)
# if N_unit2 EQ 0 then unit2 = ''
# print,' X Y ' + titname[0] + ' ' + titname[1]
# if (N_unit1 GT 0) || (N_unit2 GT 0) then $
# print,unit1 ,unit2,f='(t23,a,t33,a)'
# for i=0l, npts-1 do $
# print,FORMAT=fmt, float(x[i]), float(y[i]), a[i], d[i]
# endelse
# endif
#
# return
# end
| 43.486364
| 84
| 0.56005
| 1,247
| 9,567
| 4.281476
| 0.281476
| 0.004495
| 0.005619
| 0.014609
| 0.16239
| 0.121933
| 0.085971
| 0.06593
| 0.06593
| 0.06593
| 0
| 0.039435
| 0.326748
| 9,567
| 219
| 85
| 43.684932
| 0.789474
| 0.906554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0c93cb6dd6cf4703a13978a52dea32a70d350517
| 784
|
py
|
Python
|
py_client/aidm/__init__.py
|
sma-software/openviriato.algorithm-platform.py-client
|
73d4cf89aa6f4d02ab15b5504d92107848742325
|
[
"Apache-2.0"
] | 2
|
2021-06-21T06:50:29.000Z
|
2021-06-30T15:58:02.000Z
|
py_client/aidm/__init__.py
|
sma-software/openviriato.algorithm-platform.py-client
|
73d4cf89aa6f4d02ab15b5504d92107848742325
|
[
"Apache-2.0"
] | null | null | null |
py_client/aidm/__init__.py
|
sma-software/openviriato.algorithm-platform.py-client
|
73d4cf89aa6f4d02ab15b5504d92107848742325
|
[
"Apache-2.0"
] | null | null | null |
from py_client.aidm.aidm_algorithm_classes import *
from py_client.aidm.aidm_enum_classes import *
from py_client.aidm.aidm_floating_point import *
from py_client.aidm.aidm_link_classes import *
from py_client.aidm.aidm_routing_edge_classes import *
from py_client.aidm.aidm_routing_point_classes import *
from py_client.aidm.aidm_table_cell_classes import *
from py_client.aidm.aidm_table_classes import *
from py_client.aidm.aidm_time_window_classes import *
from py_client.aidm.aidm_track_closure_classes import *
from py_client.aidm.aidm_train_classification_classes import *
from py_client.aidm.aidm_train_path_node_classes import *
from py_client.aidm.aidm_update_classes import *
from py_client.aidm.aidm_termination_request import TerminationRequest, SignalType
| 52.266667
| 83
| 0.855867
| 122
| 784
| 5.090164
| 0.229508
| 0.135266
| 0.270531
| 0.360709
| 0.766506
| 0.7343
| 0.692432
| 0.373591
| 0
| 0
| 0
| 0
| 0.090561
| 784
| 14
| 84
| 56
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cc2ef44ee606f3da56e31f6712a9da0b3ce9b60
| 5,712
|
py
|
Python
|
tests/test_engine/test_queries/test_queryop_comparsion_nin.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_nin.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_nin.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import re
from montydb.errors import OperationFailure
from montydb.types import bson_ as bson
from ...conftest import skip_if_no_bson
def count_documents(cursor, spec=None):
return cursor.collection.count_documents(spec or {})
def test_qop_nin_1(monty_find, mongo_find):
docs = [
{"a": 0},
{"a": 1}
]
spec = {"a": {"$nin": [0]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_nin_2(monty_find, mongo_find):
docs = [
{"a": [1, 0]},
{"a": [1, 2]},
{"a": 3},
]
spec = {"a": {"$nin": [0, 2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 2
def test_qop_nin_3(monty_find, mongo_find):
docs = [
{"a": {"1": 5}},
{"a": [1, 2]},
{"a": 0},
]
spec = {"a.1": {"$nin": [5, 2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 2
def test_qop_nin_4(monty_find, mongo_find):
docs = [
{"a": {"b": 5}},
{"a": {"b": [2]}},
{"a": {"c": [2, 5]}},
]
spec = {"a.b": {"$nin": [5, 2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 2
def test_qop_nin_5(monty_find, mongo_find):
docs = [
{"a": {"b": [[0]]}},
{"a": {"b": [2]}},
{"a": {"b": 2}},
]
spec = {"a.b": {"$nin": [[2], [0]]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 2
def test_qop_nin_6(monty_find, mongo_find):
docs = [
{"a": [{"b": 1}, {"b": 2}]},
{"a": [{"b": 3}, {"b": 4}]},
{"x": 5},
]
spec = {"a.b": {"$nin": [2]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_nin_7(monty_find, mongo_find):
docs = [
{"a": [{"b": 1}, {"b": 2}]}
]
spec = {"a.b": {"$nin": [True]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_nin_8(monty_find, mongo_find):
docs = [
{"a": [{"b": 1}]},
{"a": [{"x": 1}]},
]
spec = {"a.b": {"$nin": [None]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 0
@skip_if_no_bson
def test_qop_nin_9(monty_find, mongo_find):
docs = [
{"a": "banana"},
]
spec = {"a": {"$nin": [bson.Regex("^a")]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_nin_10(monty_find, mongo_find):
docs = [
{"a": [bson.Regex("*")]},
]
spec = {"a": {"$nin": [[bson.Regex("*")]]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_nin_11(monty_find, mongo_find):
docs = [
{"a": "banana"},
]
spec = {"a": {"$nin": [re.compile("^a")]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_nin_12(monty_find, mongo_find):
docs = [
{"a": "apple"},
]
spec = {"a": {"$nin": [bson.Regex("*")]}}
monty_c = monty_find(docs, spec)
# Regular expression is invalid
with pytest.raises(OperationFailure):
next(monty_c)
def test_qop_nin_13(monty_find, mongo_find):
docs = [
{"a": 5},
]
spec = {"a": {"$nin": 5}}
monty_c = monty_find(docs, spec)
# $nin needs an array
with pytest.raises(OperationFailure):
next(monty_c)
def test_qop_nin_14(monty_find, mongo_find):
docs = [
{"a": 5},
]
spec = {"a": {"$nin": [5, {"$exists": 1}]}}
monty_c = monty_find(docs, spec)
# cannot nest $ under $nin
with pytest.raises(OperationFailure):
next(monty_c)
| 24.834783
| 75
| 0.58666
| 831
| 5,712
| 3.747292
| 0.090253
| 0.100193
| 0.104367
| 0.141297
| 0.874438
| 0.860951
| 0.817598
| 0.773282
| 0.757225
| 0.757225
| 0
| 0.018332
| 0.235994
| 5,712
| 229
| 76
| 24.943231
| 0.695234
| 0.012955
| 0
| 0.581818
| 0
| 0
| 0.030002
| 0
| 0
| 0
| 0
| 0
| 0.218182
| 1
| 0.090909
| false
| 0
| 0.030303
| 0.006061
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cca5d410b4e9598e7d598f136cddba5ed364266
| 29
|
py
|
Python
|
pyBSE/pybse/__init__.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 8
|
2017-12-04T22:32:25.000Z
|
2021-10-01T11:45:09.000Z
|
pyBSE/pybse/__init__.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 2
|
2018-03-14T00:10:43.000Z
|
2021-05-02T18:51:11.000Z
|
pyBSE/pybse/__init__.py
|
GSavathrakis/dart_board
|
9430d97675d69e381b701499587a02fd71b02990
|
[
"MIT"
] | 2
|
2018-07-17T23:00:01.000Z
|
2021-08-25T15:46:38.000Z
|
from .bse_wrapper import *
| 7.25
| 26
| 0.724138
| 4
| 29
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 29
| 3
| 27
| 9.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cf0d29356f6aef890c987c670ac1b99d8fdb784
| 139
|
py
|
Python
|
tests/clazz/packageA/packageB/module_b1.py
|
hiroki0525/autoload_module
|
f3e10dc02d0fd24b8caa872f8c71f8902dc44f83
|
[
"MIT"
] | 10
|
2020-08-28T13:08:06.000Z
|
2021-12-21T12:03:05.000Z
|
tests/clazz/packageA/packageB/module_b1.py
|
hiroki0525/autoload_module
|
f3e10dc02d0fd24b8caa872f8c71f8902dc44f83
|
[
"MIT"
] | null | null | null |
tests/clazz/packageA/packageB/module_b1.py
|
hiroki0525/autoload_module
|
f3e10dc02d0fd24b8caa872f8c71f8902dc44f83
|
[
"MIT"
] | null | null | null |
from autoload import load_config
from tests.clazz.testmodule import TestModule
@load_config()
class CustomModuleB1(TestModule):
pass
| 17.375
| 45
| 0.81295
| 17
| 139
| 6.529412
| 0.647059
| 0.18018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008264
| 0.129496
| 139
| 7
| 46
| 19.857143
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0b4cc238797c30d4f040ede4fb4bda698aa414be
| 28
|
py
|
Python
|
src/asttrs/__init__.py
|
ryanchao2012/asttrs
|
b5e30ae6094f6b9d0504ca6b9c9a887df05a91c1
|
[
"MIT"
] | null | null | null |
src/asttrs/__init__.py
|
ryanchao2012/asttrs
|
b5e30ae6094f6b9d0504ca6b9c9a887df05a91c1
|
[
"MIT"
] | null | null | null |
src/asttrs/__init__.py
|
ryanchao2012/asttrs
|
b5e30ae6094f6b9d0504ca6b9c9a887df05a91c1
|
[
"MIT"
] | null | null | null |
from ._ast import * # noqa
| 14
| 27
| 0.642857
| 4
| 28
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 28
| 1
| 28
| 28
| 0.809524
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b828080493ab6a7a8f3b77302742b1202604f7b
| 25,545
|
py
|
Python
|
jaseci_core/jaseci/jac/jac_parse/jacLexer.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
jaseci_core/jaseci/jac/jac_parse/jacLexer.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
jaseci_core/jaseci/jac/jac_parse/jacLexer.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
# Generated from jac.g4 by ANTLR 4.9.2
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2Z")
buf.write("\u026b\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\3\2\3\2\3\2\3\2\3\2\3\2\3\2")
buf.write("\3\2\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\6\3")
buf.write("\6\3\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\t\3\t\3\t\3\t")
buf.write("\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3")
buf.write("\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3")
buf.write("\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\33\3\33\3\33\3\34")
buf.write("\3\34\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3\37\3\37")
buf.write("\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3\"\3\"\3#\3#\3#\3$\3")
buf.write("$\3$\3%\3%\3%\3&\3&\3&\3\'\3\'\3\'\3(\3(\3(\3(\3(\5(\u0172")
buf.write("\n(\3)\3)\3)\3)\5)\u0178\n)\3*\3*\3*\3+\3+\3+\3+\3+\3")
buf.write(",\3,\3,\3,\3,\3-\3-\3-\3-\3.\3.\3.\3/\3/\3/\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\67\3\67\38\38\39\39\39\39")
buf.write("\59\u01cc\n9\3:\3:\3:\3;\3;\3<\3<\3=\3=\3=\3>\3>\3>\3")
buf.write("?\3?\3?\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3C\3")
buf.write("C\3C\3C\3C\3C\3C\3C\3D\3D\3E\3E\3E\3E\3F\3F\3G\3G\3H\3")
buf.write("H\3I\3I\3J\3J\3K\3K\3L\3L\3M\3M\3N\3N\3O\3O\3P\6P\u020f")
buf.write("\nP\rP\16P\u0210\5P\u0213\nP\3P\3P\6P\u0217\nP\rP\16P")
buf.write("\u0218\3Q\3Q\7Q\u021d\nQ\fQ\16Q\u0220\13Q\3Q\3Q\3Q\7Q")
buf.write("\u0225\nQ\fQ\16Q\u0228\13Q\3Q\5Q\u022b\nQ\3R\3R\3R\3R")
buf.write("\3R\3R\3R\3R\3R\5R\u0236\nR\3S\6S\u0239\nS\rS\16S\u023a")
buf.write("\3T\3T\7T\u023f\nT\fT\16T\u0242\13T\3U\3U\3U\3U\7U\u0248")
buf.write("\nU\fU\16U\u024b\13U\3U\3U\3U\3U\3U\3V\3V\3V\3V\7V\u0256")
buf.write("\nV\fV\16V\u0259\13V\3V\3V\3W\3W\7W\u025f\nW\fW\16W\u0262")
buf.write("\13W\3W\3W\3X\3X\3X\3X\3Y\3Y\3\u0249\2Z\3\3\5\4\7\5\t")
buf.write("\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write("\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65")
buf.write("\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60")
buf.write("_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@\177A\u0081")
buf.write("B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008fI\u0091")
buf.write("J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1")
buf.write("R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1")
buf.write("Z\3\2\t\3\2\62;\5\2\f\f\17\17$$\5\2\f\f\17\17))\5\2C\\")
buf.write("aac|\6\2\62;C\\aac|\4\2\f\f\17\17\5\2\13\f\17\17\"\"\2")
buf.write("\u0279\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2")
buf.write("\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2")
buf.write("\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33")
buf.write("\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2")
buf.write("\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2")
buf.write("\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2")
buf.write("\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2")
buf.write("\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3")
buf.write("\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S")
buf.write("\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2")
buf.write("]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2")
buf.write("\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2")
buf.write("\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2")
buf.write("\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2")
buf.write("\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089")
buf.write("\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2")
buf.write("\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097")
buf.write("\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2")
buf.write("\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5")
buf.write("\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2")
buf.write("\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\3\u00b3")
buf.write("\3\2\2\2\5\u00bb\3\2\2\2\7\u00bf\3\2\2\2\t\u00c2\3\2\2")
buf.write("\2\13\u00c6\3\2\2\2\r\u00c9\3\2\2\2\17\u00ce\3\2\2\2\21")
buf.write("\u00d1\3\2\2\2\23\u00d7\3\2\2\2\25\u00de\3\2\2\2\27\u00e6")
buf.write("\3\2\2\2\31\u00ef\3\2\2\2\33\u00f4\3\2\2\2\35\u00fb\3")
buf.write("\2\2\2\37\u0100\3\2\2\2!\u0106\3\2\2\2#\u010b\3\2\2\2")
buf.write("%\u0111\3\2\2\2\'\u0116\3\2\2\2)\u011d\3\2\2\2+\u0122")
buf.write("\3\2\2\2-\u012a\3\2\2\2/\u012f\3\2\2\2\61\u0137\3\2\2")
buf.write("\2\63\u0140\3\2\2\2\65\u0142\3\2\2\2\67\u0145\3\2\2\2")
buf.write("9\u0149\3\2\2\2;\u014b\3\2\2\2=\u014d\3\2\2\2?\u0152\3")
buf.write("\2\2\2A\u0159\3\2\2\2C\u015b\3\2\2\2E\u015d\3\2\2\2G\u0160")
buf.write("\3\2\2\2I\u0163\3\2\2\2K\u0166\3\2\2\2M\u0169\3\2\2\2")
buf.write("O\u0171\3\2\2\2Q\u0177\3\2\2\2S\u0179\3\2\2\2U\u017c\3")
buf.write("\2\2\2W\u0181\3\2\2\2Y\u0186\3\2\2\2[\u018a\3\2\2\2]\u018d")
buf.write("\3\2\2\2_\u0190\3\2\2\2a\u0196\3\2\2\2c\u019f\3\2\2\2")
buf.write("e\u01a5\3\2\2\2g\u01af\3\2\2\2i\u01b4\3\2\2\2k\u01bb\3")
buf.write("\2\2\2m\u01c3\3\2\2\2o\u01c5\3\2\2\2q\u01cb\3\2\2\2s\u01cd")
buf.write("\3\2\2\2u\u01d0\3\2\2\2w\u01d2\3\2\2\2y\u01d4\3\2\2\2")
buf.write("{\u01d7\3\2\2\2}\u01da\3\2\2\2\177\u01dd\3\2\2\2\u0081")
buf.write("\u01e0\3\2\2\2\u0083\u01e7\3\2\2\2\u0085\u01eb\3\2\2\2")
buf.write("\u0087\u01f3\3\2\2\2\u0089\u01f5\3\2\2\2\u008b\u01f9\3")
buf.write("\2\2\2\u008d\u01fb\3\2\2\2\u008f\u01fd\3\2\2\2\u0091\u01ff")
buf.write("\3\2\2\2\u0093\u0201\3\2\2\2\u0095\u0203\3\2\2\2\u0097")
buf.write("\u0205\3\2\2\2\u0099\u0207\3\2\2\2\u009b\u0209\3\2\2\2")
buf.write("\u009d\u020b\3\2\2\2\u009f\u0212\3\2\2\2\u00a1\u022a\3")
buf.write("\2\2\2\u00a3\u0235\3\2\2\2\u00a5\u0238\3\2\2\2\u00a7\u023c")
buf.write("\3\2\2\2\u00a9\u0243\3\2\2\2\u00ab\u0251\3\2\2\2\u00ad")
buf.write("\u025c\3\2\2\2\u00af\u0265\3\2\2\2\u00b1\u0269\3\2\2\2")
buf.write("\u00b3\u00b4\7x\2\2\u00b4\u00b5\7g\2\2\u00b5\u00b6\7t")
buf.write("\2\2\u00b6\u00b7\7u\2\2\u00b7\u00b8\7k\2\2\u00b8\u00b9")
buf.write("\7q\2\2\u00b9\u00ba\7p\2\2\u00ba\4\3\2\2\2\u00bb\u00bc")
buf.write("\7/\2\2\u00bc\u00bd\7/\2\2\u00bd\u00be\7@\2\2\u00be\6")
buf.write("\3\2\2\2\u00bf\u00c0\7/\2\2\u00c0\u00c1\7@\2\2\u00c1\b")
buf.write("\3\2\2\2\u00c2\u00c3\7>\2\2\u00c3\u00c4\7/\2\2\u00c4\u00c5")
buf.write("\7/\2\2\u00c5\n\3\2\2\2\u00c6\u00c7\7>\2\2\u00c7\u00c8")
buf.write("\7/\2\2\u00c8\f\3\2\2\2\u00c9\u00ca\7>\2\2\u00ca\u00cb")
buf.write("\7/\2\2\u00cb\u00cc\7/\2\2\u00cc\u00cd\7@\2\2\u00cd\16")
buf.write("\3\2\2\2\u00ce\u00cf\7/\2\2\u00cf\u00d0\7/\2\2\u00d0\20")
buf.write("\3\2\2\2\u00d1\u00d2\7i\2\2\u00d2\u00d3\7t\2\2\u00d3\u00d4")
buf.write("\7c\2\2\u00d4\u00d5\7r\2\2\u00d5\u00d6\7j\2\2\u00d6\22")
buf.write("\3\2\2\2\u00d7\u00d8\7u\2\2\u00d8\u00d9\7v\2\2\u00d9\u00da")
buf.write("\7t\2\2\u00da\u00db\7k\2\2\u00db\u00dc\7e\2\2\u00dc\u00dd")
buf.write("\7v\2\2\u00dd\24\3\2\2\2\u00de\u00df\7f\2\2\u00df\u00e0")
buf.write("\7k\2\2\u00e0\u00e1\7i\2\2\u00e1\u00e2\7t\2\2\u00e2\u00e3")
buf.write("\7c\2\2\u00e3\u00e4\7r\2\2\u00e4\u00e5\7j\2\2\u00e5\26")
buf.write("\3\2\2\2\u00e6\u00e7\7u\2\2\u00e7\u00e8\7w\2\2\u00e8\u00e9")
buf.write("\7d\2\2\u00e9\u00ea\7i\2\2\u00ea\u00eb\7t\2\2\u00eb\u00ec")
buf.write("\7c\2\2\u00ec\u00ed\7r\2\2\u00ed\u00ee\7j\2\2\u00ee\30")
buf.write("\3\2\2\2\u00ef\u00f0\7p\2\2\u00f0\u00f1\7q\2\2\u00f1\u00f2")
buf.write("\7f\2\2\u00f2\u00f3\7g\2\2\u00f3\32\3\2\2\2\u00f4\u00f5")
buf.write("\7k\2\2\u00f5\u00f6\7i\2\2\u00f6\u00f7\7p\2\2\u00f7\u00f8")
buf.write("\7q\2\2\u00f8\u00f9\7t\2\2\u00f9\u00fa\7g\2\2\u00fa\34")
buf.write("\3\2\2\2\u00fb\u00fc\7v\2\2\u00fc\u00fd\7c\2\2\u00fd\u00fe")
buf.write("\7m\2\2\u00fe\u00ff\7g\2\2\u00ff\36\3\2\2\2\u0100\u0101")
buf.write("\7u\2\2\u0101\u0102\7r\2\2\u0102\u0103\7c\2\2\u0103\u0104")
buf.write("\7y\2\2\u0104\u0105\7p\2\2\u0105 \3\2\2\2\u0106\u0107")
buf.write("\7y\2\2\u0107\u0108\7k\2\2\u0108\u0109\7v\2\2\u0109\u010a")
buf.write("\7j\2\2\u010a\"\3\2\2\2\u010b\u010c\7g\2\2\u010c\u010d")
buf.write("\7p\2\2\u010d\u010e\7v\2\2\u010e\u010f\7t\2\2\u010f\u0110")
buf.write("\7{\2\2\u0110$\3\2\2\2\u0111\u0112\7g\2\2\u0112\u0113")
buf.write("\7z\2\2\u0113\u0114\7k\2\2\u0114\u0115\7v\2\2\u0115&\3")
buf.write("\2\2\2\u0116\u0117\7n\2\2\u0117\u0118\7g\2\2\u0118\u0119")
buf.write("\7p\2\2\u0119\u011a\7i\2\2\u011a\u011b\7v\2\2\u011b\u011c")
buf.write("\7j\2\2\u011c(\3\2\2\2\u011d\u011e\7m\2\2\u011e\u011f")
buf.write("\7g\2\2\u011f\u0120\7{\2\2\u0120\u0121\7u\2\2\u0121*\3")
buf.write("\2\2\2\u0122\u0123\7e\2\2\u0123\u0124\7q\2\2\u0124\u0125")
buf.write("\7p\2\2\u0125\u0126\7v\2\2\u0126\u0127\7g\2\2\u0127\u0128")
buf.write("\7z\2\2\u0128\u0129\7v\2\2\u0129,\3\2\2\2\u012a\u012b")
buf.write("\7k\2\2\u012b\u012c\7p\2\2\u012c\u012d\7h\2\2\u012d\u012e")
buf.write("\7q\2\2\u012e.\3\2\2\2\u012f\u0130\7f\2\2\u0130\u0131")
buf.write("\7g\2\2\u0131\u0132\7v\2\2\u0132\u0133\7c\2\2\u0133\u0134")
buf.write("\7k\2\2\u0134\u0135\7n\2\2\u0135\u0136\7u\2\2\u0136\60")
buf.write("\3\2\2\2\u0137\u0138\7c\2\2\u0138\u0139\7e\2\2\u0139\u013a")
buf.write("\7v\2\2\u013a\u013b\7k\2\2\u013b\u013c\7x\2\2\u013c\u013d")
buf.write("\7k\2\2\u013d\u013e\7v\2\2\u013e\u013f\7{\2\2\u013f\62")
buf.write("\3\2\2\2\u0140\u0141\7<\2\2\u0141\64\3\2\2\2\u0142\u0143")
buf.write("\7<\2\2\u0143\u0144\7<\2\2\u0144\66\3\2\2\2\u0145\u0146")
buf.write("\7<\2\2\u0146\u0147\7<\2\2\u0147\u0148\7@\2\2\u01488\3")
buf.write("\2\2\2\u0149\u014a\7}\2\2\u014a:\3\2\2\2\u014b\u014c\7")
buf.write("\177\2\2\u014c<\3\2\2\2\u014d\u014e\7g\2\2\u014e\u014f")
buf.write("\7f\2\2\u014f\u0150\7i\2\2\u0150\u0151\7g\2\2\u0151>\3")
buf.write("\2\2\2\u0152\u0153\7y\2\2\u0153\u0154\7c\2\2\u0154\u0155")
buf.write("\7n\2\2\u0155\u0156\7m\2\2\u0156\u0157\7g\2\2\u0157\u0158")
buf.write("\7t\2\2\u0158@\3\2\2\2\u0159\u015a\7=\2\2\u015aB\3\2\2")
buf.write("\2\u015b\u015c\7?\2\2\u015cD\3\2\2\2\u015d\u015e\7-\2")
buf.write("\2\u015e\u015f\7?\2\2\u015fF\3\2\2\2\u0160\u0161\7/\2")
buf.write("\2\u0161\u0162\7?\2\2\u0162H\3\2\2\2\u0163\u0164\7,\2")
buf.write("\2\u0164\u0165\7?\2\2\u0165J\3\2\2\2\u0166\u0167\7\61")
buf.write("\2\2\u0167\u0168\7?\2\2\u0168L\3\2\2\2\u0169\u016a\7<")
buf.write("\2\2\u016a\u016b\7?\2\2\u016bN\3\2\2\2\u016c\u016d\7c")
buf.write("\2\2\u016d\u016e\7p\2\2\u016e\u0172\7f\2\2\u016f\u0170")
buf.write("\7(\2\2\u0170\u0172\7(\2\2\u0171\u016c\3\2\2\2\u0171\u016f")
buf.write("\3\2\2\2\u0172P\3\2\2\2\u0173\u0174\7q\2\2\u0174\u0178")
buf.write("\7t\2\2\u0175\u0176\7~\2\2\u0176\u0178\7~\2\2\u0177\u0173")
buf.write("\3\2\2\2\u0177\u0175\3\2\2\2\u0178R\3\2\2\2\u0179\u017a")
buf.write("\7k\2\2\u017a\u017b\7h\2\2\u017bT\3\2\2\2\u017c\u017d")
buf.write("\7g\2\2\u017d\u017e\7n\2\2\u017e\u017f\7k\2\2\u017f\u0180")
buf.write("\7h\2\2\u0180V\3\2\2\2\u0181\u0182\7g\2\2\u0182\u0183")
buf.write("\7n\2\2\u0183\u0184\7u\2\2\u0184\u0185\7g\2\2\u0185X\3")
buf.write("\2\2\2\u0186\u0187\7h\2\2\u0187\u0188\7q\2\2\u0188\u0189")
buf.write("\7t\2\2\u0189Z\3\2\2\2\u018a\u018b\7v\2\2\u018b\u018c")
buf.write("\7q\2\2\u018c\\\3\2\2\2\u018d\u018e\7d\2\2\u018e\u018f")
buf.write("\7{\2\2\u018f^\3\2\2\2\u0190\u0191\7y\2\2\u0191\u0192")
buf.write("\7j\2\2\u0192\u0193\7k\2\2\u0193\u0194\7n\2\2\u0194\u0195")
buf.write("\7g\2\2\u0195`\3\2\2\2\u0196\u0197\7e\2\2\u0197\u0198")
buf.write("\7q\2\2\u0198\u0199\7p\2\2\u0199\u019a\7v\2\2\u019a\u019b")
buf.write("\7k\2\2\u019b\u019c\7p\2\2\u019c\u019d\7w\2\2\u019d\u019e")
buf.write("\7g\2\2\u019eb\3\2\2\2\u019f\u01a0\7d\2\2\u01a0\u01a1")
buf.write("\7t\2\2\u01a1\u01a2\7g\2\2\u01a2\u01a3\7c\2\2\u01a3\u01a4")
buf.write("\7m\2\2\u01a4d\3\2\2\2\u01a5\u01a6\7f\2\2\u01a6\u01a7")
buf.write("\7k\2\2\u01a7\u01a8\7u\2\2\u01a8\u01a9\7g\2\2\u01a9\u01aa")
buf.write("\7p\2\2\u01aa\u01ab\7i\2\2\u01ab\u01ac\7c\2\2\u01ac\u01ad")
buf.write("\7i\2\2\u01ad\u01ae\7g\2\2\u01aef\3\2\2\2\u01af\u01b0")
buf.write("\7u\2\2\u01b0\u01b1\7m\2\2\u01b1\u01b2\7k\2\2\u01b2\u01b3")
buf.write("\7r\2\2\u01b3h\3\2\2\2\u01b4\u01b5\7t\2\2\u01b5\u01b6")
buf.write("\7g\2\2\u01b6\u01b7\7r\2\2\u01b7\u01b8\7q\2\2\u01b8\u01b9")
buf.write("\7t\2\2\u01b9\u01ba\7v\2\2\u01baj\3\2\2\2\u01bb\u01bc")
buf.write("\7f\2\2\u01bc\u01bd\7g\2\2\u01bd\u01be\7u\2\2\u01be\u01bf")
buf.write("\7v\2\2\u01bf\u01c0\7t\2\2\u01c0\u01c1\7q\2\2\u01c1\u01c2")
buf.write("\7{\2\2\u01c2l\3\2\2\2\u01c3\u01c4\7(\2\2\u01c4n\3\2\2")
buf.write("\2\u01c5\u01c6\7\60\2\2\u01c6p\3\2\2\2\u01c7\u01cc\7#")
buf.write("\2\2\u01c8\u01c9\7p\2\2\u01c9\u01ca\7q\2\2\u01ca\u01cc")
buf.write("\7v\2\2\u01cb\u01c7\3\2\2\2\u01cb\u01c8\3\2\2\2\u01cc")
buf.write("r\3\2\2\2\u01cd\u01ce\7?\2\2\u01ce\u01cf\7?\2\2\u01cf")
buf.write("t\3\2\2\2\u01d0\u01d1\7>\2\2\u01d1v\3\2\2\2\u01d2\u01d3")
buf.write("\7@\2\2\u01d3x\3\2\2\2\u01d4\u01d5\7>\2\2\u01d5\u01d6")
buf.write("\7?\2\2\u01d6z\3\2\2\2\u01d7\u01d8\7@\2\2\u01d8\u01d9")
buf.write("\7?\2\2\u01d9|\3\2\2\2\u01da\u01db\7#\2\2\u01db\u01dc")
buf.write("\7?\2\2\u01dc~\3\2\2\2\u01dd\u01de\7k\2\2\u01de\u01df")
buf.write("\7p\2\2\u01df\u0080\3\2\2\2\u01e0\u01e1\7c\2\2\u01e1\u01e2")
buf.write("\7p\2\2\u01e2\u01e3\7e\2\2\u01e3\u01e4\7j\2\2\u01e4\u01e5")
buf.write("\7q\2\2\u01e5\u01e6\7t\2\2\u01e6\u0082\3\2\2\2\u01e7\u01e8")
buf.write("\7j\2\2\u01e8\u01e9\7c\2\2\u01e9\u01ea\7u\2\2\u01ea\u0084")
buf.write("\3\2\2\2\u01eb\u01ec\7r\2\2\u01ec\u01ed\7t\2\2\u01ed\u01ee")
buf.write("\7k\2\2\u01ee\u01ef\7x\2\2\u01ef\u01f0\7c\2\2\u01f0\u01f1")
buf.write("\7v\2\2\u01f1\u01f2\7g\2\2\u01f2\u0086\3\2\2\2\u01f3\u01f4")
buf.write("\7.\2\2\u01f4\u0088\3\2\2\2\u01f5\u01f6\7e\2\2\u01f6\u01f7")
buf.write("\7c\2\2\u01f7\u01f8\7p\2\2\u01f8\u008a\3\2\2\2\u01f9\u01fa")
buf.write("\7-\2\2\u01fa\u008c\3\2\2\2\u01fb\u01fc\7/\2\2\u01fc\u008e")
buf.write("\3\2\2\2\u01fd\u01fe\7,\2\2\u01fe\u0090\3\2\2\2\u01ff")
buf.write("\u0200\7\61\2\2\u0200\u0092\3\2\2\2\u0201\u0202\7\'\2")
buf.write("\2\u0202\u0094\3\2\2\2\u0203\u0204\7`\2\2\u0204\u0096")
buf.write("\3\2\2\2\u0205\u0206\7*\2\2\u0206\u0098\3\2\2\2\u0207")
buf.write("\u0208\7+\2\2\u0208\u009a\3\2\2\2\u0209\u020a\7]\2\2\u020a")
buf.write("\u009c\3\2\2\2\u020b\u020c\7_\2\2\u020c\u009e\3\2\2\2")
buf.write("\u020d\u020f\t\2\2\2\u020e\u020d\3\2\2\2\u020f\u0210\3")
buf.write("\2\2\2\u0210\u020e\3\2\2\2\u0210\u0211\3\2\2\2\u0211\u0213")
buf.write("\3\2\2\2\u0212\u020e\3\2\2\2\u0212\u0213\3\2\2\2\u0213")
buf.write("\u0214\3\2\2\2\u0214\u0216\7\60\2\2\u0215\u0217\t\2\2")
buf.write("\2\u0216\u0215\3\2\2\2\u0217\u0218\3\2\2\2\u0218\u0216")
buf.write("\3\2\2\2\u0218\u0219\3\2\2\2\u0219\u00a0\3\2\2\2\u021a")
buf.write("\u021e\7$\2\2\u021b\u021d\n\3\2\2\u021c\u021b\3\2\2\2")
buf.write("\u021d\u0220\3\2\2\2\u021e\u021c\3\2\2\2\u021e\u021f\3")
buf.write("\2\2\2\u021f\u0221\3\2\2\2\u0220\u021e\3\2\2\2\u0221\u022b")
buf.write("\7$\2\2\u0222\u0226\7)\2\2\u0223\u0225\n\4\2\2\u0224\u0223")
buf.write("\3\2\2\2\u0225\u0228\3\2\2\2\u0226\u0224\3\2\2\2\u0226")
buf.write("\u0227\3\2\2\2\u0227\u0229\3\2\2\2\u0228\u0226\3\2\2\2")
buf.write("\u0229\u022b\7)\2\2\u022a\u021a\3\2\2\2\u022a\u0222\3")
buf.write("\2\2\2\u022b\u00a2\3\2\2\2\u022c\u022d\7v\2\2\u022d\u022e")
buf.write("\7t\2\2\u022e\u022f\7w\2\2\u022f\u0236\7g\2\2\u0230\u0231")
buf.write("\7h\2\2\u0231\u0232\7c\2\2\u0232\u0233\7n\2\2\u0233\u0234")
buf.write("\7u\2\2\u0234\u0236\7g\2\2\u0235\u022c\3\2\2\2\u0235\u0230")
buf.write("\3\2\2\2\u0236\u00a4\3\2\2\2\u0237\u0239\t\2\2\2\u0238")
buf.write("\u0237\3\2\2\2\u0239\u023a\3\2\2\2\u023a\u0238\3\2\2\2")
buf.write("\u023a\u023b\3\2\2\2\u023b\u00a6\3\2\2\2\u023c\u0240\t")
buf.write("\5\2\2\u023d\u023f\t\6\2\2\u023e\u023d\3\2\2\2\u023f\u0242")
buf.write("\3\2\2\2\u0240\u023e\3\2\2\2\u0240\u0241\3\2\2\2\u0241")
buf.write("\u00a8\3\2\2\2\u0242\u0240\3\2\2\2\u0243\u0244\7\61\2")
buf.write("\2\u0244\u0245\7,\2\2\u0245\u0249\3\2\2\2\u0246\u0248")
buf.write("\13\2\2\2\u0247\u0246\3\2\2\2\u0248\u024b\3\2\2\2\u0249")
buf.write("\u024a\3\2\2\2\u0249\u0247\3\2\2\2\u024a\u024c\3\2\2\2")
buf.write("\u024b\u0249\3\2\2\2\u024c\u024d\7,\2\2\u024d\u024e\7")
buf.write("\61\2\2\u024e\u024f\3\2\2\2\u024f\u0250\bU\2\2\u0250\u00aa")
buf.write("\3\2\2\2\u0251\u0252\7\61\2\2\u0252\u0253\7\61\2\2\u0253")
buf.write("\u0257\3\2\2\2\u0254\u0256\n\7\2\2\u0255\u0254\3\2\2\2")
buf.write("\u0256\u0259\3\2\2\2\u0257\u0255\3\2\2\2\u0257\u0258\3")
buf.write("\2\2\2\u0258\u025a\3\2\2\2\u0259\u0257\3\2\2\2\u025a\u025b")
buf.write("\bV\2\2\u025b\u00ac\3\2\2\2\u025c\u0260\7%\2\2\u025d\u025f")
buf.write("\n\7\2\2\u025e\u025d\3\2\2\2\u025f\u0262\3\2\2\2\u0260")
buf.write("\u025e\3\2\2\2\u0260\u0261\3\2\2\2\u0261\u0263\3\2\2\2")
buf.write("\u0262\u0260\3\2\2\2\u0263\u0264\bW\2\2\u0264\u00ae\3")
buf.write("\2\2\2\u0265\u0266\t\b\2\2\u0266\u0267\3\2\2\2\u0267\u0268")
buf.write("\bX\2\2\u0268\u00b0\3\2\2\2\u0269\u026a\13\2\2\2\u026a")
buf.write("\u00b2\3\2\2\2\22\2\u0171\u0177\u01cb\u0210\u0212\u0218")
buf.write("\u021e\u0226\u022a\u0235\u023a\u0240\u0249\u0257\u0260")
buf.write("\3\b\2\2")
return buf.getvalue()
class jacLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
KW_GRAPH = 8
KW_STRICT = 9
KW_DIGRAPH = 10
KW_SUBGRAPH = 11
KW_NODE = 12
KW_IGNORE = 13
KW_TAKE = 14
KW_SPAWN = 15
KW_WITH = 16
KW_ENTRY = 17
KW_EXIT = 18
KW_LENGTH = 19
KW_KEYS = 20
KW_CONTEXT = 21
KW_INFO = 22
KW_DETAILS = 23
KW_ACTIVITY = 24
COLON = 25
DBL_COLON = 26
COLON_OUT = 27
LBRACE = 28
RBRACE = 29
KW_EDGE = 30
KW_WALKER = 31
SEMI = 32
EQ = 33
PEQ = 34
MEQ = 35
TEQ = 36
DEQ = 37
CPY_EQ = 38
KW_AND = 39
KW_OR = 40
KW_IF = 41
KW_ELIF = 42
KW_ELSE = 43
KW_FOR = 44
KW_TO = 45
KW_BY = 46
KW_WHILE = 47
KW_CONTINUE = 48
KW_BREAK = 49
KW_DISENGAGE = 50
KW_SKIP = 51
KW_REPORT = 52
KW_DESTROY = 53
DEREF = 54
DOT = 55
NOT = 56
EE = 57
LT = 58
GT = 59
LTE = 60
GTE = 61
NE = 62
KW_IN = 63
KW_ANCHOR = 64
KW_HAS = 65
KW_PRIVATE = 66
COMMA = 67
KW_CAN = 68
PLUS = 69
MINUS = 70
MUL = 71
DIV = 72
MOD = 73
POW = 74
LPAREN = 75
RPAREN = 76
LSQUARE = 77
RSQUARE = 78
FLOAT = 79
STRING = 80
BOOL = 81
INT = 82
NAME = 83
COMMENT = 84
LINE_COMMENT = 85
PY_COMMENT = 86
WS = 87
ErrorChar = 88
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'version'", "'-->'", "'->'", "'<--'", "'<-'", "'<-->'", "'--'",
"'graph'", "'strict'", "'digraph'", "'subgraph'", "'node'",
"'ignore'", "'take'", "'spawn'", "'with'", "'entry'", "'exit'",
"'length'", "'keys'", "'context'", "'info'", "'details'", "'activity'",
"':'", "'::'", "'::>'", "'{'", "'}'", "'edge'", "'walker'",
"';'", "'='", "'+='", "'-='", "'*='", "'/='", "':='", "'if'",
"'elif'", "'else'", "'for'", "'to'", "'by'", "'while'", "'continue'",
"'break'", "'disengage'", "'skip'", "'report'", "'destroy'",
"'&'", "'.'", "'=='", "'<'", "'>'", "'<='", "'>='", "'!='",
"'in'", "'anchor'", "'has'", "'private'", "','", "'can'", "'+'",
"'-'", "'*'", "'/'", "'%'", "'^'", "'('", "')'", "'['", "']'" ]
symbolicNames = [ "<INVALID>",
"KW_GRAPH", "KW_STRICT", "KW_DIGRAPH", "KW_SUBGRAPH", "KW_NODE",
"KW_IGNORE", "KW_TAKE", "KW_SPAWN", "KW_WITH", "KW_ENTRY", "KW_EXIT",
"KW_LENGTH", "KW_KEYS", "KW_CONTEXT", "KW_INFO", "KW_DETAILS",
"KW_ACTIVITY", "COLON", "DBL_COLON", "COLON_OUT", "LBRACE",
"RBRACE", "KW_EDGE", "KW_WALKER", "SEMI", "EQ", "PEQ", "MEQ",
"TEQ", "DEQ", "CPY_EQ", "KW_AND", "KW_OR", "KW_IF", "KW_ELIF",
"KW_ELSE", "KW_FOR", "KW_TO", "KW_BY", "KW_WHILE", "KW_CONTINUE",
"KW_BREAK", "KW_DISENGAGE", "KW_SKIP", "KW_REPORT", "KW_DESTROY",
"DEREF", "DOT", "NOT", "EE", "LT", "GT", "LTE", "GTE", "NE",
"KW_IN", "KW_ANCHOR", "KW_HAS", "KW_PRIVATE", "COMMA", "KW_CAN",
"PLUS", "MINUS", "MUL", "DIV", "MOD", "POW", "LPAREN", "RPAREN",
"LSQUARE", "RSQUARE", "FLOAT", "STRING", "BOOL", "INT", "NAME",
"COMMENT", "LINE_COMMENT", "PY_COMMENT", "WS", "ErrorChar" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"KW_GRAPH", "KW_STRICT", "KW_DIGRAPH", "KW_SUBGRAPH",
"KW_NODE", "KW_IGNORE", "KW_TAKE", "KW_SPAWN", "KW_WITH",
"KW_ENTRY", "KW_EXIT", "KW_LENGTH", "KW_KEYS", "KW_CONTEXT",
"KW_INFO", "KW_DETAILS", "KW_ACTIVITY", "COLON", "DBL_COLON",
"COLON_OUT", "LBRACE", "RBRACE", "KW_EDGE", "KW_WALKER",
"SEMI", "EQ", "PEQ", "MEQ", "TEQ", "DEQ", "CPY_EQ", "KW_AND",
"KW_OR", "KW_IF", "KW_ELIF", "KW_ELSE", "KW_FOR", "KW_TO",
"KW_BY", "KW_WHILE", "KW_CONTINUE", "KW_BREAK", "KW_DISENGAGE",
"KW_SKIP", "KW_REPORT", "KW_DESTROY", "DEREF", "DOT",
"NOT", "EE", "LT", "GT", "LTE", "GTE", "NE", "KW_IN",
"KW_ANCHOR", "KW_HAS", "KW_PRIVATE", "COMMA", "KW_CAN",
"PLUS", "MINUS", "MUL", "DIV", "MOD", "POW", "LPAREN",
"RPAREN", "LSQUARE", "RSQUARE", "FLOAT", "STRING", "BOOL",
"INT", "NAME", "COMMENT", "LINE_COMMENT", "PY_COMMENT",
"WS", "ErrorChar" ]
grammarFileName = "jac.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 58.859447
| 103
| 0.558857
| 5,660
| 25,545
| 2.490636
| 0.168198
| 0.139179
| 0.074271
| 0.078882
| 0.319359
| 0.198127
| 0.13705
| 0.117968
| 0.116195
| 0.103426
| 0
| 0.335783
| 0.158505
| 25,545
| 433
| 104
| 58.995381
| 0.320013
| 0.001409
| 0
| 0
| 1
| 0.46988
| 0.619447
| 0.554597
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004819
| false
| 0
| 0.012048
| 0
| 0.253012
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e7eb1cc9b94fe8f97ab365303db26104068e08ec
| 26
|
py
|
Python
|
duplicates/__init__.py
|
akcarsten/duplicates
|
b61b6f4bb562b21b3daf239cb0284b9c131f97c3
|
[
"MIT"
] | 10
|
2021-01-11T14:53:28.000Z
|
2022-03-09T00:57:02.000Z
|
duplicates/__init__.py
|
akcarsten/duplicates
|
b61b6f4bb562b21b3daf239cb0284b9c131f97c3
|
[
"MIT"
] | 1
|
2021-05-05T05:49:02.000Z
|
2021-05-10T03:25:41.000Z
|
duplicates/__init__.py
|
akcarsten/duplicates
|
b61b6f4bb562b21b3daf239cb0284b9c131f97c3
|
[
"MIT"
] | 5
|
2021-06-05T05:30:17.000Z
|
2022-03-20T22:38:53.000Z
|
from .duplicates import *
| 13
| 25
| 0.769231
| 3
| 26
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7fdf24b5ee3c3cbfcf5aa79439f710a5d2929e6
| 41
|
py
|
Python
|
ScanningTools/__init__.py
|
fincardona/ScanningTools.py
|
ecbd07b2535b3388bd66c7f9c738ec5367d1d05a
|
[
"MIT"
] | 1
|
2018-10-09T10:31:56.000Z
|
2018-10-09T10:31:56.000Z
|
ScanningTools/__init__.py
|
fincardona/ScanningTools.py
|
ecbd07b2535b3388bd66c7f9c738ec5367d1d05a
|
[
"MIT"
] | null | null | null |
ScanningTools/__init__.py
|
fincardona/ScanningTools.py
|
ecbd07b2535b3388bd66c7f9c738ec5367d1d05a
|
[
"MIT"
] | null | null | null |
from . import ScanningTools, Quaternions
| 20.5
| 40
| 0.829268
| 4
| 41
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2b357070580aea5bfdf8d64e159d7282fb3c089
| 23,038
|
py
|
Python
|
src/dataset.py
|
atzberg/gmls-nets
|
d78e5b513b7dda8491f68e11dab730f106f86385
|
[
"BSD-3-Clause"
] | 18
|
2019-09-17T18:58:26.000Z
|
2021-08-05T06:02:16.000Z
|
src/dataset.py
|
atzberg/gmls-nets
|
d78e5b513b7dda8491f68e11dab730f106f86385
|
[
"BSD-3-Clause"
] | null | null | null |
src/dataset.py
|
atzberg/gmls-nets
|
d78e5b513b7dda8491f68e11dab730f106f86385
|
[
"BSD-3-Clause"
] | 4
|
2019-11-16T04:02:07.000Z
|
2021-03-06T11:43:04.000Z
|
"""
Collection of codes for generating some training data sets.
"""
# Authors: B.J. Gross and P.J. Atzberger
# Website: http://atzberger.org/
import torch;
import numpy as np;
import pdb;
class diffOp1(torch.utils.data.Dataset):
r"""
Generates samples of the form :math:`(u^{[i]},f^{[i]})` where :math:`f^{[i]} = L[u^{[i]}]`,
where :math:`i` denotes the index of the sample.
Stores data samples in the form :math:`(u,f)`.
The samples of u are represented as a tensor of size [nsamples,nchannels,nx]
and sample of f as a tensor of size [nsamples,nchannels,nx].
Note:
For now, please use nx that is odd. In this initial implementation, we use a
method based on conjugated flips with formula for the odd case which is slightly
simpler than other case.
"""
def flipForFFT(self,u_k_part):
r"""We flip as :math:`f_k = f_{N-k}`. Notice that only :math:`0,\ldots,N-1` entries
stored. This is useful for constructing real-valued function representations
from random coefficients. Real-valued function requires :math:`conj(f_k) = f_{N-k}`.
We can use this flip to construct from random coefficients the term
:math:`u_k = f_k + conj(flip(f_k))`, then above constraint is satisfied.
Args:
a (Tensor): 1d array to flip.
Returns:
Tensor: The flipped tensors symmetric under conjucation.
"""
nx = self.nx;
uu = u_k_part[:,:,nx:0:-1];
vv = u_k_part[:,:,0];
vv = np.expand_dims(vv,2);
uu_k_flip = np.concatenate([vv,uu],2);
return uu_k_flip;
def getComplex(self,a,b):
j = np.complex(0,1); # create complex number (or use 1j).
c = a + j*b;
return c;
def getRealImag(self,c):
a = np.real(c);
b = np.imag(c);
return a,b;
def computeLSymbol_ux(self):
r"""Compute associated Fourier symbols for use under DFT for the operator L[u]."""
nx = self.nx;
vec_k1 = torch.zeros(nx);
vec_k1_pp = torch.zeros(nx);
vec_k_sq = torch.zeros(nx);
L_symbol_real = torch.zeros(nx,dtype=torch.float32);
L_symbol_imag = torch.zeros(nx,dtype=torch.float32);
two_pi = 2.0*np.pi;
#two_pi_i = two_pi*1j; # $2\pi{i}$, 1j = sqrt(-1)
for i in range(0,nx):
vec_k1[i] = i;
if (vec_k1[i] < nx/2):
vec_k1_p = vec_k1[i];
else:
vec_k1_p = vec_k1[i] - nx;
vec_k1_pp[i] = vec_k1_p;
L_symbol_real[i] = 0.0;
L_symbol_imag[i] = two_pi*vec_k1_p;
L_hat = self.getComplex(L_symbol_real.numpy(),L_symbol_imag.numpy());
return L_hat, vec_k1_pp;
def computeLSymbol_uxx(self):
r"""Compute associated Fourier symbols for use under DFT for the operator L[u]."""
nx = self.nx;
vec_k1 = torch.zeros(nx);
vec_k1_pp = torch.zeros(nx);
vec_k_sq = torch.zeros(nx);
L_symbol_real = torch.zeros(nx,dtype=torch.float32);
L_symbol_imag = torch.zeros(nx,dtype=torch.float32);
neg_four_pi_sq = -4.0*np.pi*np.pi;
for i in range(0,nx):
vec_k1[i] = i;
vec_k_sq[i] = vec_k1[i]*vec_k1[i];
if (vec_k1[i] < nx/2):
vec_k1_p = vec_k1[i];
else:
vec_k1_p = vec_k1[i] - nx;
vec_k1_pp[i] = vec_k1_p;
vec_k_p_sq = vec_k1_p*vec_k1_p;
L_symbol_real[i] = neg_four_pi_sq*vec_k_p_sq;
L_symbol_imag[i] = 0.0;
L_hat = self.getComplex(L_symbol_real.numpy(),L_symbol_imag.numpy());
return L_hat, vec_k1_pp;
def computeCoeffActionL(self,u_hat,L_hat):
r"""Computes the action of operator L used for data generation in Fourier space."""
u_k_real, u_k_imag = self.getRealImag(u_hat);
L_symbol_real, L_symbol_imag = self.getRealImag(L_hat);
f_k_real = L_symbol_real*u_k_real - L_symbol_imag*u_k_imag; #broadcast will distr over copies of u.
f_k_imag = L_symbol_real*u_k_imag + L_symbol_imag*u_k_real;
# Generate samples u and f using ifft
f_hat = self.getComplex(f_k_real,f_k_imag);
return f_hat;
def computeActionL(self,u,L_hat):
r"""Computes the action of operator L used for data generation."""
raise Exception('Currently this routine not debugged, need to test first.')
if flag_verbose > 0:
print("computeActionL(): WARNING: Not yet fully tested.");
# perform FFT to get u_hat
u_hat = np.fft.fft(u);
# compute action of L_hat
f_hat = self.computeCoeffActionL(u_hat,L_hat);
# compute inverse FFT to get f
f = np.fft.ifft(f_hat);
return f;
def __init__(self,op_type='uxx',op_params=None,
gen_mode='exp1',gen_params={'alpha1':0.1},
num_samples=int(1e4),nchannels=1,nx=15,
flag_verbose=0, **extra_params):
r"""Setup for data generation.
Args:
op_type (str): The differential operator to sample.
op_params (dict): The operator parameters.
gen_mode (str): The mode for the data generator.
gen_params (dict): The parameters for the given generator.
num_samples (int): The number of samples to generate.
nchannels (int): The number of channels.
nx (int): The number of input sample points.
flag_verbose (int): Level of reporting during calculations.
extra_params (dict): Extra parameters for the sampler.
For extra_params we have:
noise_factor (float): The amount of noise to add to samples.
scale_factor (float): A factor to scale magnitude of the samples.
flagComputeL (bool): If the fourier symbol of operator should be computed.
For generator modes we have:
gen_mode == 'exp1':
alpha1 (float): The decay rate.
Note:
For now, please use only nx that is odd. In this initial implementation, we use a
method based on conjugated flips with formula for the odd case which is slightly
simpler than other case.
"""
super(diffOp1, self).__init__();
if flag_verbose > 0:
print("Generating the data samples which can take some time.");
print("num_samples = %d"%num_samples);
self.op_type=op_type;
self.op_params=op_params;
self.gen_mode=gen_mode;
self.gen_params=gen_params;
self.num_samples=num_samples;
self.nchannels=nchannels;
self.nx=nx;
if (nx % 2 == 0):
msg = "Not allowed yet to use nx that is even. ";
msg += "For now, please just use nx that is odd given the flips currently used."
raise Exception(msg);
noise_factor=0;scale_factor=1.0;flagComputeL=False; # default values
if 'noise_factor' in extra_params:
noise_factor = extra_params['noise_factor'];
if 'scale_factor' in extra_params:
scale_factor = extra_params['scale_factor'];
if 'flagComputeL' in extra_params:
flagComputeL = extra_params['flagComputeL'];
# Generate for the operator the Fourier symbols
if self.op_type == 'ux' or self.op_type == 'u*ux' or self.op_type == 'ux*ux':
L_hat, vec_k1_pp = self.computeLSymbol_ux();
elif self.op_type == 'uxx' or self.op_type == 'u*uxx' or self.op_type == 'uxx*uxx':
L_hat, vec_k1_pp = self.computeLSymbol_uxx();
else:
raise Exception("Unkonwn operator type.");
if (flagComputeL):
L_i = np.fft.ifft(L_hat);
self.L_hat = L_hat;
self.L_i = L_i;
u = np.zeros(nx);
i0 = int(nx/2);
u[i0] = 1.0;
self.G_i = self.computeActionL(u);
# Generate random input function (want real-valued)
# conj(u_k) = u_{N -k} needs to hold.
u_k_real = np.random.randn(num_samples,nchannels,nx);
u_k_imag = np.random.randn(num_samples,nchannels,nx);
# scale modes to make smooth
if gen_mode=='exp1':
alpha1 = gen_params['alpha1'];
factor_k = scale_factor*np.exp(-alpha1*vec_k1_pp**2);
factor_k = factor_k.numpy();
else:
raise Exception("Generation mode not recognized.");
u_k_real = u_k_real*factor_k; # broadcast will apply over last two dimensions
u_k_imag = u_k_imag*factor_k; # broadcast will apply over last two dimensions
flag_debug = False;
if flag_debug:
if flag_verbose > 0:
print("WARNING: debugging mode on.");
u_k_real = 0.0*u_k_real;
u_k_imag = 0.0*u_k_imag;
u_k_real[0,0,1] = nx;
u_k_imag[0,0,1] = 0;
u_k_real[1,0,1] = 0;
u_k_imag[1,0,1] = nx;
u_k_real[2,0,1] = nx;
u_k_imag[2,0,1] = nx;
# flip modes for constructing rep of real-valued function
u_k_real_flip = self.flipForFFT(u_k_real);
u_k_imag_flip = self.flipForFFT(u_k_imag);
u_k_real_p = 0.5*u_k_real + 0.5*u_k_real_flip; # make conjugate conj(u_k) = u_{N -k}
u_k_imag_p = 0.5*u_k_imag - 0.5*u_k_imag_flip; # make conjugate conj(u_k) = u_{N -k}
u_k_real_p = torch.from_numpy(u_k_real_p);
u_k_imag_p = torch.from_numpy(u_k_imag_p);
u_k_real_p = u_k_real_p.type(torch.float32);
u_k_imag_p = u_k_imag_p.type(torch.float32);
u_hat = self.getComplex(u_k_real_p.numpy(),u_k_imag_p.numpy());
f_hat = self.computeCoeffActionL(u_hat,L_hat);
f_hat = f_hat; # target operator relation for PDEs later is Lu = -f, so f = -Lu.
# Generate samples u and f, in 2d using ifft2.
# ifft2 is broadcast over last two indices
# perform inverse DFT to get u and f.
u_i = np.fft.ifft(u_hat);
f_i = np.fft.ifft(f_hat);
if self.op_type == 'u*ux':
f_i = u_i*f_i;
elif self.op_type == 'ux*ux':
f_i = f_i*f_i;
elif self.op_type == 'u*uxx':
f_i = u_i*f_i;
elif self.op_type == 'uxx*uxx':
f_i = f_i*f_i;
self.samples_X = torch.from_numpy(np.real(u_i)).type(torch.float32); # only grab real part
self.samples_Y = torch.from_numpy(np.real(f_i)).type(torch.float32);
if noise_factor > 0:
self.samples_Y += noise_factor*torch.randn(*self.samples_Y.shape);
def __len__(self):
return self.samples_X.size()[0];
def __getitem__(self,index):
return self.samples_X[index],self.samples_Y[index];
def to(self,device):
self.samples_X = self.samples_X.to(device);
self.samples_Y = self.samples_Y.to(device);
return self;
class diffOp2(torch.utils.data.Dataset):
r"""
Generates samples of the form :math:`(u^{[i]},f^{[i]})` where :math:`f^{[i]} = L[u^{[i]}]`,
where :math:`i` denotes the index of the sample.
Stores data samples in the form :math:`(u,f)`.
The samples of u are represented as a tensor of size [nsamples,nchannels,nx]
and sample of f as a tensor of size [nsamples,nchannels,nx].
Note:
For now, please use nx that is odd. In this initial implementation, we use a
method based on conjugated flips with formula for the odd case which is slightly
simpler than other case.
"""
def flipForFFT(self,u_k_part):
r"""We flip as :math:`f_k = f_{N-k}`. Notice that only :math:`0,\ldots,N-1` entries
stored. This is useful for constructing real-valued function representations
from random coefficients. Real-valued function requires :math:`conj(f_k) = f_{N-k}`.
We can use this flip to construct from random coefficients the term
:math:`u_k = f_k + conj(flip(f_k))`, then above constraint is satisfied.
Args:
a (Tensor): 1d array to flip.
Returns:
Tensor: The flipped tensors symmetric under conjucation.
"""
nx = self.nx;ny = self.ny;
u_k_part_row0 = u_k_part[:,:,0,:];
u_k_part_row0 = np.expand_dims(u_k_part_row0,2);
u_k_part_ex = np.concatenate([u_k_part,u_k_part_row0],2);
u_k_part_col0 = u_k_part_ex[:,:,:,0];
u_k_part_col0 = np.expand_dims(u_k_part_col0,3);
u_k_part_ex = np.concatenate([u_k_part_ex,u_k_part_col0],3);
u_k_part_ex_flip = np.flip(u_k_part_ex,2);
u_k_part_ex_flip = np.flip(u_k_part_ex_flip,3);
u_k_part_flip = np.delete(u_k_part_ex_flip,nx,2);
u_k_part_flip = np.delete(u_k_part_flip,ny,3);
return u_k_part_flip;
def getComplex(self,a,b):
j = np.complex(0,1); # create complex number (or use 1j).
c = a + j*b;
return c;
def getRealImag(self,c):
a = np.real(c);
b = np.imag(c);
return a,b;
def computeLSymbol_laplacian_u(self):
r"""Compute associated Fourier symbols for use under DFT for the operator L[u]."""
num_dim = 1;nx=self.nx;ny=self.ny;
vec_k1 = torch.zeros(nx,ny);
vec_k2 = torch.zeros(nx,ny);
vec_k1_pp = torch.zeros(nx,ny);
vec_k2_pp = torch.zeros(nx,ny);
vec_k_sq = torch.zeros(nx,ny);
L_symbol_real = torch.zeros(nx,ny,dtype=torch.float32);
L_symbol_imag = torch.zeros(nx,ny,dtype=torch.float32);
neg_four_pi_sq = -4.0*np.pi*np.pi;
for i in range(0,nx):
for j in range(0,ny):
vec_k1[i,j] = i;
vec_k2[i,j] = j;
vec_k_sq[i,j] = vec_k1[i,j]*vec_k1[i,j] + vec_k2[i,j]*vec_k2[i,j];
if (vec_k1[i,j] < nx/2):
vec_k1_p = vec_k1[i,j];
else:
vec_k1_p = vec_k1[i,j] - nx;
if (vec_k2[i,j] < ny/2):
vec_k2_p = vec_k2[i,j];
else:
vec_k2_p = vec_k2[i,j] - ny;
vec_k1_pp[i,j] = vec_k1_p;
vec_k2_pp[i,j] = vec_k2_p;
vec_k_p_sq = vec_k1_p*vec_k1_p + vec_k2_p*vec_k2_p;
L_symbol_real[i,j] = neg_four_pi_sq*vec_k_p_sq;
L_symbol_imag[i,j] = 0.0;
L_hat = self.getComplex(L_symbol_real.numpy(),L_symbol_imag.numpy());
return L_hat, vec_k1_pp, vec_k2_pp;
def computeLSymbol_grad_u(self):
r"""Compute associated Fourier symbols for use under DFT for the operator L[u]."""
num_dim = 2;nx=self.nx;ny=self.ny;
vec_k1 = torch.zeros(nx,ny);
vec_k2 = torch.zeros(nx,ny);
vec_k1_pp = torch.zeros(nx,ny);
vec_k2_pp = torch.zeros(nx,ny);
vec_k_sq = torch.zeros(nx,ny);
L_symbol_real = torch.zeros(num_dim,nx,ny,dtype=torch.float32);
L_symbol_imag = torch.zeros(num_dim,nx,ny,dtype=torch.float32);
two_pi = 2.0*np.pi;
#two_pi_i = two_pi*1j; # $2\pi{i}$, 1j = sqrt(-1)
for i in range(0,nx):
for j in range(0,ny):
vec_k1[i,j] = i;
vec_k2[i,j] = j;
vec_k_sq[i,j] = vec_k1[i,j]*vec_k1[i,j] + vec_k2[i,j]*vec_k2[i,j];
if (vec_k1[i,j] < nx/2):
vec_k1_p = vec_k1[i,j];
else:
vec_k1_p = vec_k1[i,j] - nx;
if (vec_k2[i,j] < ny/2):
vec_k2_p = vec_k2[i,j];
else:
vec_k2_p = vec_k2[i,j] - ny;
vec_k1_pp[i,j] = vec_k1_p;
vec_k2_pp[i,j] = vec_k2_p;
vec_k_p_sq = vec_k1_p*vec_k1_p + vec_k2_p*vec_k2_p;
L_symbol_real[0,i,j] = 0.0;
L_symbol_imag[0,i,j] = two_pi*vec_k1_p;
L_symbol_real[1,i,j] = 0.0;
L_symbol_imag[1,i,j] = two_pi*vec_k2_p;
L_hat_0 = self.getComplex(L_symbol_real[0,:,:].numpy(),L_symbol_imag[0,:,:].numpy());
L_hat_1 = self.getComplex(L_symbol_real[1,:,:].numpy(),L_symbol_imag[1,:,:].numpy());
L_hat = np.stack((L_hat_0,L_hat_1));
return L_hat, vec_k1_pp, vec_k2_pp;
def computeCoeffActionL(self,u_hat,L_hat):
r"""Computes the action of operator L used for data generation in Fourier space."""
u_k_real, u_k_imag = self.getRealImag(u_hat);
L_symbol_real, L_symbol_imag = self.getRealImag(L_hat);
f_k_real = L_symbol_real*u_k_real - L_symbol_imag*u_k_imag; #broadcast will distr over copies of u.
#f_k_real = -1.0*f_k_real;
f_k_imag = L_symbol_real*u_k_imag + L_symbol_imag*u_k_real;
#f_k_imag = -1.0*f_k_imag;
# Generate samples u and f using ifft2.
f_hat = self.getComplex(f_k_real,f_k_imag);
return f_hat;
def computeActionL(self,u,L_hat):
r"""Computes the action of operator L used for data generation."""
raise Exception('Currently this routine not debugged, need to test first.')
# perform FFT to get u_hat
u_hat = np.fft.fft2(u);
# compute action of L_hat
f_hat = self.computeCoeffActionL(u_hat,L_hat);
# compute inverse FFT to get f
f = np.fft.ifft2(f_hat)
return f;
def __init__(self,op_type=r'\Delta{u}',op_params=None,
gen_mode='exp1',gen_params={'alpha1':0.1},
num_samples=int(1e4),nchannels=1,nx=15,ny=15,
flag_verbose=0, **extra_params):
r"""Setup for data generation.
Args:
op_type (str): The differential operator to sample.
op_params (dict): The operator parameters.
gen_mode (str): The mode for the data generator.
gen_params (dict): The parameters for the given generator.
num_samples (int): The number of samples to generate.
nchannels (int): The number of channels.
nx (int): The number of input sample points in x-direction.
ny (int): The number of input sample points in y- direction.
flag_verbose (int): Level of reporting during calculations.
extra_params (dict): Extra parameters for the sampler.
For extra_params we have:
noise_factor (float): The amount of noise to add to samples.
scale_factor (float): A factor to scale magnitude of the samples.
flagComputeL (bool): If the fourier symbol of operator should be computed.
For generator modes we have:
gen_mode == 'exp1':
alpha1 (float): The decay rate.
Note:
For now, please use only nx that is odd. In this initial implementation, we use a
method based on conjugated flips with formula for the odd case which is slightly
simpler than other case.
"""
if flag_verbose > 0:
print("Generating the data samples which can take some time.");
print("num_samples = %d"%num_samples);
self.op_type=op_type;
self.op_params=op_params;
self.gen_mode=gen_mode;
self.gen_params=gen_params;
self.num_samples=num_samples;
self.nchannels=nchannels;
self.nx=nx; self.ny=ny;
if (nx % 2 == 0) or (ny % 2 == 0) or (nx != ny): # may be able to relax nx != ny (just for safety)
msg = "Not allowed yet to use nx,ny that are even or unequal. ";
msg += "For now, please just use nx,ny that is odd given the flips currently used."
raise Exception(msg);
noise_factor=0;scale_factor=1.0;flagComputeL=False; # default values
if 'noise_factor' in extra_params:
noise_factor = extra_params['noise_factor'];
if 'scale_factor' in extra_params:
scale_factor = extra_params['scale_factor'];
if 'flagComputeL' in extra_params:
flagComputeL = extra_params['flagComputeL'];
# Generate for the operator the Fourier symbols
flag_vv = 'null';
if self.op_type == r'\grad{u}' or self.op_type == r'u\grad{u}' or self.op_type == r'\grad{u}\cdot\grad{u}':
L_hat, vec_k1_pp, vec_k2_pp = self.computeLSymbol_grad_u();
flag_vv = 'vector2';
elif self.op_type == r'\Delta{u}' or self.op_type == r'u\Delta{u}' or self.op_type == r'\Delta{u}*\Delta{u}':
L_hat, vec_k1_pp, vec_k2_pp = self.computeLSymbol_laplacian_u();
flag_vv = 'scalar';
else:
raise Exception("Unknown operator type.");
if (flagComputeL):
raise Exception("Currently not yet supported, the flagComputeL.");
L_i = np.fft.ifft2(L_hat);
self.L_hat = L_hat;
self.L_i = L_i;
u = np.zeros(nx,ny);
i0 = int(nx/2);
j0 = int(ny/2);
u[i0,j0] = 1.0;
self.G_i = self.computeActionL(u);
# Generate random input function (want real-valued)
# conj(u_k) = u_{N -k} needs to hold.
u_k_real = np.random.randn(num_samples,nchannels,nx,ny);
u_k_imag = np.random.randn(num_samples,nchannels,nx,ny);
# scale modes to make smooth
if gen_mode=='exp1':
alpha1 = gen_params['alpha1'];
factor_k = scale_factor*np.exp(-alpha1*(vec_k1_pp**2 + vec_k2_pp**2));
factor_k = factor_k.numpy();
else:
raise Exception("Generation mode not recognized.");
u_k_real = u_k_real*factor_k; # broadcast will apply over last two dimensions
u_k_imag = u_k_imag*factor_k; # broadcast will apply over last two dimensions
# flip modes for constructing rep of real-valued function
u_k_real_flip = self.flipForFFT(u_k_real);
u_k_imag_flip = self.flipForFFT(u_k_imag);
u_k_real = 0.5*u_k_real + 0.5*u_k_real_flip; # make conjugate conj(u_k) = u_{N -k}
u_k_imag = 0.5*u_k_imag - 0.5*u_k_imag_flip; # make conjugate conj(u_k) = u_{N -k}
u_k_real = torch.from_numpy(u_k_real);
u_k_imag = torch.from_numpy(u_k_imag);
u_k_real = u_k_real.type(torch.float32);
u_k_imag = u_k_imag.type(torch.float32);
u_hat = self.getComplex(u_k_real.numpy(),u_k_imag.numpy());
if flag_vv == 'scalar':
f_hat = self.computeCoeffActionL(u_hat,L_hat);
elif flag_vv == 'vector2':
f_hat_0 = self.computeCoeffActionL(u_hat,L_hat[0,:,:]);
f_hat_1 = self.computeCoeffActionL(u_hat,L_hat[1,:,:]);
f_hat = np.concatenate((f_hat_0,f_hat_1),-3);
else:
raise Exception("Unkonwn operator type.");
# Generate samples u and f using ifft2.
# ifft2 is broadcast over last two indices
# perform inverse DFT to get u and f
u_i = np.fft.ifft2(u_hat);
if flag_vv == 'scalar':
f_i = np.fft.ifft2(f_hat);
elif flag_vv == 'vector2':
f_i_0 = np.fft.ifft2(f_hat[:,0,:,:]);
f_i_1 = np.fft.ifft2(f_hat[:,1,:,:]);
f_i = np.stack((f_i_0,f_i_1),-3);
else:
raise Exception("Unkonwn operator type.");
if self.op_type == r'\grad{u}':
f_i = f_i; # nothing to do.
elif self.op_type == r'u\grad{u}':
f_i = u_i*f_i; # matches up by broadcast rules
elif self.op_type == r'\grad{u}\cdot\grad{u}':
f_i = np.sum(f_i**2,1); # sum on axis for channels, [batch,channel,nx,ny].
f_i = np.expand_dims(f_i,1); # keep in form [batch,1,nx,ny]
elif self.op_type == r'\Delta{u}':
f_i = f_i; # nothing to do.
elif self.op_type == r'u\Delta{u}':
f_i = u_i*f_i;
elif self.op_type == r'\Delta{u}*\Delta{u}':
f_i = f_i**2;
else:
raise Exception("Unkonwn operator type.");
self.samples_X = torch.from_numpy(np.real(u_i)).type(torch.float32); # only grab real part
self.samples_Y = torch.from_numpy(np.real(f_i)).type(torch.float32);
if noise_factor > 0:
self.samples_Y += noise_factor*torch.randn(*self.samples_Y.shape);
def __len__(self):
return self.samples_X.size()[0];
def __getitem__(self,index):
return self.samples_X[index],self.samples_Y[index];
def to(self,device):
self.samples_X = self.samples_X.to(device);
self.samples_Y = self.samples_Y.to(device);
return self;
| 36.510301
| 113
| 0.625358
| 3,883
| 23,038
| 3.465619
| 0.084213
| 0.0162
| 0.016497
| 0.010701
| 0.911124
| 0.876644
| 0.843056
| 0.80865
| 0.790072
| 0.771717
| 0
| 0.021344
| 0.249588
| 23,038
| 630
| 114
| 36.568254
| 0.757057
| 0.298203
| 0
| 0.604113
| 0
| 0
| 0.078749
| 0.002709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056555
| false
| 0
| 0.007712
| 0.010283
| 0.120823
| 0.015424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6534aa15fe9e54affb901ef2b18a9190fe51c44f
| 224
|
py
|
Python
|
blurr/data/all.py
|
HenryDashwood/blurr
|
dececd4e706129694b25ee80c15dfb61ffadf9a9
|
[
"Apache-2.0"
] | null | null | null |
blurr/data/all.py
|
HenryDashwood/blurr
|
dececd4e706129694b25ee80c15dfb61ffadf9a9
|
[
"Apache-2.0"
] | null | null | null |
blurr/data/all.py
|
HenryDashwood/blurr
|
dececd4e706129694b25ee80c15dfb61ffadf9a9
|
[
"Apache-2.0"
] | null | null | null |
from ..utils import *
from .core import *
from .question_answering import *
from .token_classification import *
from .text2text.core import *
from .text2text.language_modeling import *
from .text2text.summarization import *
| 28
| 42
| 0.794643
| 27
| 224
| 6.481481
| 0.444444
| 0.342857
| 0.325714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015306
| 0.125
| 224
| 7
| 43
| 32
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6545f00275d150f756fbd1778003e8a75c79ccd9
| 6,497
|
py
|
Python
|
tests/test_inventory_set.py
|
qhyou11/bridgy
|
887ff2b41d0d04ad469fb4838191a4a578597bdd
|
[
"MIT"
] | 402
|
2017-08-09T19:27:49.000Z
|
2022-03-22T10:24:00.000Z
|
tests/test_inventory_set.py
|
qhyou11/bridgy
|
887ff2b41d0d04ad469fb4838191a4a578597bdd
|
[
"MIT"
] | 33
|
2017-08-03T23:10:44.000Z
|
2020-12-16T03:29:33.000Z
|
tests/test_inventory_set.py
|
qhyou11/bridgy
|
887ff2b41d0d04ad469fb4838191a4a578597bdd
|
[
"MIT"
] | 58
|
2017-11-19T21:22:45.000Z
|
2022-03-22T09:25:04.000Z
|
import os
import mock
import pytest
import bridgy.inventory
from bridgy.inventory import InventorySet, Instance
from bridgy.inventory.aws import AwsInventory
from bridgy.config import Config
def get_aws_inventory(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = AwsInventory(name=name, cache_dir=cache_dir, access_key_id='access_key_id',
secret_access_key='secret_access_key', session_token='session_token',
region='region')
return aws_obj
def test_inventory_set(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = get_aws_inventory(name='aws')
inventorySet = InventorySet()
inventorySet.add(aws_obj)
inventorySet.add(aws_obj)
print(aws_obj.instances())
all_instances = inventorySet.instances()
aws_instances = [
Instance(name=u'test-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-8-185.us-west-2.compute.internal', u'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-138.us-west-2.compute.internal', u'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-account-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-139.us-west-2.compute.internal', u'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-142.us-west-2.compute.internal', u'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-140.us-west-2.compute.internal', u'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-141.us-west-2.compute.internal', u'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-38.us-west-2.compute.internal', u'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-39.us-west-2.compute.internal', u'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
]
expected_instances = aws_instances + aws_instances
assert len(all_instances) == len(expected_instances)
assert set(all_instances) == set(expected_instances)
def test_inventory_set_filter_sources(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
inventorySet = InventorySet()
inventorySet.add(get_aws_inventory(name='aws'))
inventorySet.add(get_aws_inventory(name='awesome'))
print(inventorySet.instances())
all_instances = inventorySet.instances(filter_sources='awesome')
# aws_instances = [
# Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
# ]
awesome_instances = [
Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='awesome (aws)', container_id=None, type='VM')
]
assert len(all_instances) == len(awesome_instances)
assert set(all_instances) == set(awesome_instances)
all_instances = inventorySet.instances(filter_sources='bogus')
assert len(all_instances) == 0
| 75.546512
| 199
| 0.68447
| 951
| 6,497
| 4.570978
| 0.092534
| 0.066253
| 0.038647
| 0.077295
| 0.851392
| 0.830688
| 0.79135
| 0.764205
| 0.757074
| 0.757074
| 0
| 0.068737
| 0.117747
| 6,497
| 86
| 200
| 75.546512
| 0.689637
| 0.232415
| 0
| 0.172414
| 0
| 0
| 0.314848
| 0.134217
| 0
| 0
| 0
| 0
| 0.086207
| 1
| 0.051724
| false
| 0
| 0.12069
| 0
| 0.189655
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8dae38169e57058281461b6150c04aef43d8d21
| 1,285
|
py
|
Python
|
challenge-20/test_solver.py
|
mauricioklein/algorithm-exercises
|
1be95762d000102795059255a0a0d2d21d4b67fc
|
[
"MIT"
] | 3
|
2019-12-03T11:40:36.000Z
|
2020-06-28T19:39:51.000Z
|
challenge-20/test_solver.py
|
mauricioklein/algorithm-exercises
|
1be95762d000102795059255a0a0d2d21d4b67fc
|
[
"MIT"
] | null | null | null |
challenge-20/test_solver.py
|
mauricioklein/algorithm-exercises
|
1be95762d000102795059255a0a0d2d21d4b67fc
|
[
"MIT"
] | null | null | null |
import unittest
from solver import ListNode
class TestSolver(unittest.TestCase):
def test_solver_iteratively(self):
# Create nodes
nodes = [
ListNode(4),
ListNode(3),
ListNode(2),
ListNode(1)
]
# Chain the nodes
for i in range(len(nodes)-1):
nodes[i].next = nodes[i+1]
nodes[len(nodes)-1].next = None
# Reverse the list
head = nodes[0]
head.reverseIteratively(head)
# Verify
node = nodes[len(nodes)-1]
for i in range(1,5):
self.assertEqual(node.val, i)
node = node.next
def test_solver_recursively(self):
# Create nodes
nodes = [
ListNode(4),
ListNode(3),
ListNode(2),
ListNode(1)
]
# Chain the nodes
for i in range(len(nodes)-1):
nodes[i].next = nodes[i+1]
nodes[len(nodes)-1].next = None
# Reverse the list
head = nodes[0]
head.reverseRecursively(head)
# Verify
node = nodes[len(nodes)-1]
for i in range(1,5):
self.assertEqual(node.val, i)
node = node.next
if __name__ == "__main__":
unittest.main()
| 23.363636
| 41
| 0.506615
| 149
| 1,285
| 4.288591
| 0.281879
| 0.075117
| 0.084507
| 0.068858
| 0.726135
| 0.726135
| 0.726135
| 0.726135
| 0.726135
| 0.726135
| 0
| 0.027813
| 0.384436
| 1,285
| 54
| 42
| 23.796296
| 0.780025
| 0.081712
| 0
| 0.702703
| 0
| 0
| 0.006832
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3358dbdb86f277d5e708f3ec2c05b724de337198
| 19,413
|
py
|
Python
|
pyvkfft/fft.py
|
isuruf/pyvkfft
|
1cb234c55b9af6b5fd85fc2082572d428819779b
|
[
"MIT"
] | null | null | null |
pyvkfft/fft.py
|
isuruf/pyvkfft
|
1cb234c55b9af6b5fd85fc2082572d428819779b
|
[
"MIT"
] | null | null | null |
pyvkfft/fft.py
|
isuruf/pyvkfft
|
1cb234c55b9af6b5fd85fc2082572d428819779b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PyVkFFT
# (c) 2021- : ESRF-European Synchrotron Radiation Facility
# authors:
# Vincent Favre-Nicolin, favre@esrf.fr
__all__ = ['fftn', 'ifftn', 'rfftn', 'irfftn', 'vkfft_version', 'clear_vkfftapp_cache',
'has_pycuda', 'has_opencl', 'has_cupy']
from enum import Enum
from functools import lru_cache
import numpy as np
from .base import complex32
from .config import FFT_CACHE_NB
try:
from .cuda import VkFFTApp as VkFFTApp_cuda, has_pycuda, has_cupy, vkfft_version
if has_pycuda:
import pycuda.gpuarray as cua
if has_cupy:
import cupy as cp
except ImportError:
has_cupy, has_pycuda = False, False
try:
from .opencl import VkFFTApp as VkFFTApp_cl, cla, vkfft_version
has_opencl = True
except ImportError:
has_opencl = False
class Backend(Enum):
""" Backend language & library"""
UNKNOWN = 0
PYCUDA = 1
PYOPENCL = 2
CUPY = 3
def _prepare_transform(src, dest, cl_queue, r2c=False):
"""
Determine the backend from the input data.
Create the destination array if necessary.
:param src: the source GPU array
:param dest: the destination array. If None, a new GPU array is created.
:param cl_queue: the opencl queue to use, or None
:param r2c: if True, this is for an R2C transform, so adapt the destination
array accordingly.
:return: a tuple (backend, inplace, dest, cl_queue), also appending the
destination dtype for an r2c transform.
"""
backend = Backend.UNKNOWN
if r2c:
if src.dtype in [np.float16, np.float32, np.float64]:
sh = list(src.shape)
sh[-1] = sh[-1] // 2 + 1
dtype = np.complex64
if src.dtype == np.float16:
dtype = complex32
elif src.dtype == np.float64:
dtype = np.complex128
else:
sh = list(src.shape)
sh[-1] = (sh[-1] - 1) * 2
dtype = np.float32
if src.dtype == complex32:
dtype = np.float16
elif src.dtype == np.complex128:
dtype = np.float64
else:
sh, dtype = None, None
if has_pycuda:
if isinstance(src, cua.GPUArray):
backend = Backend.PYCUDA
# Must cast the gpudata to int as it can either be a DeviceAllocation object
# or an int (e.g. when using a view of another array)
src_ptr = int(src.gpudata)
if dest is None:
if r2c:
dest = cua.empty(tuple(sh), dtype=dtype, allocator=src.allocator)
else:
dest = cua.empty_like(src)
dest_ptr = int(dest.gpudata)
if backend == Backend.UNKNOWN and has_opencl:
if isinstance(src, cla.Array):
backend = Backend.PYOPENCL
src_ptr = src.data.int_ptr
if dest is None:
if r2c:
dest = cla.empty(src.queue, tuple(sh), dtype=dtype, allocator=src.allocator)
else:
dest = cla.empty_like(src)
dest_ptr = dest.data.int_ptr
if cl_queue is None:
cl_queue = src.queue
if backend == Backend.UNKNOWN and has_cupy:
if isinstance(src, cp.ndarray):
backend = Backend.CUPY
src_ptr = src.__cuda_array_interface__['data'][0]
if dest is None:
if r2c:
dest = cp.empty(tuple(sh), dtype=dtype)
else:
dest = cp.empty_like(src)
dest_ptr = dest.__cuda_array_interface__['data'][0]
if backend == Backend.UNKNOWN:
raise RuntimeError("Could note determine the type of GPU array supplied, or the "
"corresponding backend is not installed "
"(has_pycuda=%d, has_pyopencl=%d, has_cupy=%d)" %
(has_pycuda, has_opencl, has_cupy))
inplace = dest_ptr == src_ptr
if r2c:
if inplace:
dest = src.view(dtype=dtype)
return backend, inplace, dest, cl_queue, dtype
else:
return backend, inplace, dest, cl_queue
@lru_cache(maxsize=FFT_CACHE_NB)
def _get_fft_app(backend, shape, dtype, inplace, ndim, axes, norm, cuda_stream, cl_queue):
if backend in [Backend.PYCUDA, Backend.CUPY]:
return VkFFTApp_cuda(shape, dtype, ndim=ndim, inplace=inplace,
stream=cuda_stream, norm=norm, axes=axes)
elif backend == Backend.PYOPENCL:
return VkFFTApp_cl(shape, dtype, cl_queue, ndim=ndim, inplace=inplace,
norm=norm, axes=axes)
@lru_cache(maxsize=FFT_CACHE_NB)
def _get_rfft_app(backend, shape, dtype, inplace, ndim, norm, cuda_stream, cl_queue):
if backend in [Backend.PYCUDA, Backend.CUPY]:
return VkFFTApp_cuda(shape, dtype, ndim=ndim, inplace=inplace,
stream=cuda_stream, norm=norm, r2c=True)
elif backend == Backend.PYOPENCL:
return VkFFTApp_cl(shape, dtype, cl_queue, ndim=ndim, inplace=inplace,
norm=norm, r2c=True)
@lru_cache(maxsize=FFT_CACHE_NB)
def _get_dct_app(backend, shape, dtype, inplace, ndim, norm, dct_type,
cuda_stream, cl_queue):
if backend in [Backend.PYCUDA, Backend.CUPY]:
return VkFFTApp_cuda(shape, dtype, ndim=ndim, inplace=inplace,
stream=cuda_stream, norm=norm, dct=dct_type)
elif backend == Backend.PYOPENCL:
return VkFFTApp_cl(shape, dtype, cl_queue, ndim=ndim, inplace=inplace,
norm=norm, dct=dct_type)
def fftn(src, dest=None, ndim=None, norm=1, axes=None, cuda_stream=None, cl_queue=None,
return_scale=False):
"""
Perform a FFT on a GPU array, automatically creating the VkFFTApp
and caching it for future re-use.
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: if 0 (un-normalised), every transform multiplies the L2 norm
of the array by the transform size.
if 1 (the default) or "backward", the inverse transform divides the
L2 norm by the array size, so FFT+iFFT will keep the array norm.
if "ortho", each transform will keep the L2 norm, but that will
involve an extra read & write operation.
:param axes: a list or tuple of axes along which the transform is made.
if None, the transform is done along the ndim fastest axes, or all
axes if ndim is None. Not allowed for R2C transforms
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:param return_scale: if True, return the scale factor by which the result
must be multiplied to keep its L2 norm after the transform
:return: the destination array if return_scale is False, or (dest, scale)
"""
backend, inplace, dest, cl_queue = _prepare_transform(src, dest, cl_queue, False)
app = _get_fft_app(backend, src.shape, src.dtype, inplace, ndim, axes, norm, cuda_stream, cl_queue)
app.fft(src, dest)
if return_scale:
s = app.get_fft_scale()
return dest, s
return dest
def ifftn(src, dest=None, ndim=None, norm=1, axes=None, cuda_stream=None, cl_queue=None,
return_scale=False):
"""
Perform an inverse FFT on a GPU array, automatically creating the VkFFTApp
and caching it for future re-use.
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: if 0 (un-normalised), every transform multiplies the L2 norm
of the array by the transform size.
if 1 (the default) or "backward", the inverse transform divides the
L2 norm by the array size, so FFT+iFFT will keep the array norm.
if "ortho", each transform will keep the L2 norm, but that will
involve an extra read & write operation.
:param axes: a list or tuple of axes along which the transform is made.
if None, the transform is done along the ndim fastest axes, or all
axes if ndim is None. Not allowed for R2C transforms
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:param return_scale: if True, return the scale factor by which the result
must be multiplied to keep its L2 norm after the transform
:return: the destination array if return_scale is False, or (dest, scale)
"""
backend, inplace, dest, cl_queue = _prepare_transform(src, dest, cl_queue, False)
app = _get_fft_app(backend, src.shape, src.dtype, inplace, ndim, axes, norm, cuda_stream, cl_queue)
app.ifft(src, dest)
if return_scale:
s = app.get_fft_scale()
return dest, s
return dest
def rfftn(src, dest=None, ndim=None, norm=1, cuda_stream=None, cl_queue=None,
return_scale=False):
"""
Perform a real->complex transform on a GPU array, automatically creating
the VkFFTApp and caching it for future re-use.
For an out-of-place transform, the length of the destination last axis will
be src.shape[-1]//2+1.
For an in-place transform, if the src array has a shape (..., nx+2), the
last two values along the last (X) axis are ignored, and the destination
array will have a shape of (..., nx//2+1).
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: if 0 (un-normalised), every transform multiplies the L2 norm
of the array by the transform size.
if 1 (the default) or "backward", the inverse transform divides the
L2 norm by the array size, so FFT+iFFT will keep the array norm.
if "ortho", each transform will keep the L2 norm, but that will
involve an extra read & write operation.
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:param return_scale: if True, return the scale factor by which the result
must be multiplied to keep its L2 norm after the transform
:return: the destination array if return_scale is False, or (dest, scale).
For an in-place transform, the returned value is a view of the array
with the appropriate type.
"""
backend, inplace, dest, cl_queue, dtype = _prepare_transform(src, dest, cl_queue, True)
app = _get_rfft_app(backend, src.shape, src.dtype, inplace, ndim, norm, cuda_stream, cl_queue)
app.fft(src, dest)
if return_scale:
s = app.get_fft_scale()
return dest.view(dtype=dtype), s
return dest.view(dtype=dtype)
def irfftn(src, dest=None, ndim=None, norm=1, cuda_stream=None, cl_queue=None,
return_scale=False):
"""
Perform a complex->real transform on a GPU array, automatically creating
the VkFFTApp and caching it for future re-use.
For an out-of-place transform, the length of the destination last axis will
be (src.shape[-1]-1)*2.
For an in-place transform, if the src array has a shape (..., nx), the
destination array will have a shape of (..., nx*2) but the last
two vales along the last axis are used as buffer.
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: if 0 (un-normalised), every transform multiplies the L2 norm
of the array by the transform size.
if 1 (the default) or "backward", the inverse transform divides the
L2 norm by the array size, so FFT+iFFT will keep the array norm.
if "ortho", each transform will keep the L2 norm, but that will
involve an extra read & write operation.
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:param return_scale: if True, return the scale factor by which the result
must be multiplied to keep its L2 norm after the transform
:return: the destination array if return_scale is False, or (dest, scale)
For an in-place transform, the returned value is a view of the array
with the appropriate type.
"""
backend, inplace, dest, cl_queue, dtype = _prepare_transform(src, dest, cl_queue, True)
app = _get_rfft_app(backend, dest.shape, dest.dtype, inplace, ndim, norm, cuda_stream, cl_queue)
app.ifft(src, dest)
if return_scale:
s = app.get_fft_scale()
return dest.view(dtype=dtype), s
return dest.view(dtype=dtype)
def dctn(src, dest=None, ndim=None, norm=1, dct_type=2, cuda_stream=None, cl_queue=None):
"""
Perform a real->real Direct Cosine Transform on a GPU array, automatically
creating the VkFFTApp and caching it for future re-use.
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: normalisation mode, either 0 (un-normalised) or
1 (the default, also available as "backward) which will normalise
the inverse transform, so DCT+iDCT will keep the array norm.
:param dct_type: the type of dct desired: 1, 2 (default), 3 or 4
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:return: the destination array.
"""
backend, inplace, dest, cl_queue = _prepare_transform(src, dest, cl_queue, False)
app = _get_dct_app(backend, src.shape, src.dtype, inplace, ndim, norm,
dct_type, cuda_stream, cl_queue)
app.fft(src, dest)
return dest
def idctn(src, dest=None, ndim=None, norm=1, dct_type=2, cuda_stream=None, cl_queue=None):
"""
Perform a real->real inverse Direct Cosine Transform on a GPU array,
automatically creating the VkFFTApp and caching it for future re-use.
:param src: the source pycuda.gpuarray.GPUArray or cupy.ndarray
:param dest: the destination GPU array. If None, a new GPU array will
be created and returned (using the source array allocator
(pycuda, pyopencl) if available).
If dest is the same array as src, an inplace transform is done.
:param ndim: the number of dimensions (<=3) to use for the FFT. By default,
uses the array dimensions. Can be smaller, e.g. ndim=2 for a 3D
array to perform a batched 3D FFT on all the layers. The FFT
is always performed along the last axes if the array's number
of dimension is larger than ndim, i.e. on the x-axis for ndim=1,
on the x and y axes for ndim=2.
:param norm: normalisation mode, either 0 (un-normalised) or
1 (the default, also available as "backward) which will normalise
the inverse transform, so DCT+iDCT will keep the array norm.
:param dct_type: the type of dct desired: 2 (default), 3 or 4
:param cuda_stream: the pycuda.driver.Stream or cupy.cuda.Stream to use
for the transform. If None, the default one will be used
:param cl_queue: the pyopencl.CommandQueue to be used. If None,
the source array default queue will be used
:return: the destination array.
"""
backend, inplace, dest, cl_queue = _prepare_transform(src, dest, cl_queue, False)
app = _get_dct_app(backend, src.shape, src.dtype, inplace, ndim, norm,
dct_type, cuda_stream, cl_queue)
app.ifft(src, dest)
return dest
def clear_vkfftapp_cache():
""" Remove all cached VkFFTApp"""
_get_fft_app.cache_clear()
_get_rfft_app.cache_clear()
| 47.34878
| 103
| 0.660897
| 2,967
| 19,413
| 4.248399
| 0.085608
| 0.023879
| 0.013963
| 0.010472
| 0.835462
| 0.82618
| 0.801269
| 0.792067
| 0.77858
| 0.766283
| 0
| 0.010573
| 0.264359
| 19,413
| 409
| 104
| 47.464548
| 0.872068
| 0.57446
| 0
| 0.463855
| 0
| 0
| 0.031246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066265
| false
| 0
| 0.066265
| 0
| 0.271084
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
335f058bccdc187671e284b665d0a36b6d5cf372
| 8,060
|
py
|
Python
|
models/normalization.py
|
pkulwj1994/AdversarialConsistentScoreMatching
|
f439f242f004ce06382ed72f2aa7daf9c262abfa
|
[
"MIT"
] | 119
|
2020-09-09T13:59:28.000Z
|
2022-03-17T17:04:10.000Z
|
models/normalization.py
|
pkulwj1994/AdversarialConsistentScoreMatching
|
f439f242f004ce06382ed72f2aa7daf9c262abfa
|
[
"MIT"
] | 2
|
2020-11-13T03:26:22.000Z
|
2021-03-19T23:04:33.000Z
|
models/normalization.py
|
pkulwj1994/AdversarialConsistentScoreMatching
|
f439f242f004ce06382ed72f2aa7daf9c262abfa
|
[
"MIT"
] | 19
|
2020-09-14T05:56:51.000Z
|
2021-12-28T15:53:34.000Z
|
import torch
import torch.nn as nn
def get_normalization(m_config, conditional=True):
norm = m_config.normalization
if conditional:
if norm == 'NoneNorm':
return ConditionalNoneNorm2d
elif norm == 'InstanceNorm++':
return ConditionalInstanceNorm2dPlus
elif norm == 'InstanceNorm':
return ConditionalInstanceNorm2d
elif norm == 'BatchNorm':
return ConditionalBatchNorm2d
elif norm == 'VarianceNorm':
return ConditionalVarianceNorm2d
else:
raise NotImplementedError("{} does not exist!".format(norm))
else:
if norm == 'BatchNorm':
return nn.BatchNorm2d
elif norm == 'InstanceNorm':
return nn.InstanceNorm2d
elif norm == 'InstanceNorm++':
return InstanceNorm2dPlus
elif norm == 'VarianceNorm':
return VarianceNorm2d
elif norm == 'NoneNorm':
return NoneNorm2d
elif norm is None:
return None
else:
raise NotImplementedError("{} does not exist!".format(norm))
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.bn = nn.BatchNorm2d(num_features, affine=False)
if self.bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
out = self.bn(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out
return out
class ConditionalInstanceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
h = self.instance_norm(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalVarianceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
f_vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(f_vars + 1e-5)
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
f_vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(f_vars + 1e-5)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalNoneNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * x
return out
# noinspection PyUnusedLocal
class NoneNorm2d(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
@staticmethod
def forward(x):
return x
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalInstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 3)
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, 2 * num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
| 38.199052
| 112
| 0.591687
| 1,045
| 8,060
| 4.394258
| 0.091866
| 0.15331
| 0.10453
| 0.044425
| 0.790941
| 0.783754
| 0.776786
| 0.766333
| 0.738023
| 0.712544
| 0
| 0.026506
| 0.279156
| 8,060
| 210
| 113
| 38.380952
| 0.763855
| 0.029032
| 0
| 0.705556
| 0
| 0
| 0.01868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094444
| false
| 0
| 0.011111
| 0.005556
| 0.255556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6856c6f0caf7be2b8f762fcf7db6f96ea6b3e2c0
| 22
|
py
|
Python
|
HangmanGame/views/__init__.py
|
github-675455/JogoDaForca
|
66b3aaae97c64c116f12d3a98c53bf0fc383bd63
|
[
"MIT"
] | null | null | null |
HangmanGame/views/__init__.py
|
github-675455/JogoDaForca
|
66b3aaae97c64c116f12d3a98c53bf0fc383bd63
|
[
"MIT"
] | 2
|
2018-08-29T03:34:02.000Z
|
2018-08-29T18:25:44.000Z
|
HangmanGame/views/__init__.py
|
github-675455/JogoDaForca
|
66b3aaae97c64c116f12d3a98c53bf0fc383bd63
|
[
"MIT"
] | null | null | null |
from .Jogo import Jogo
| 22
| 22
| 0.818182
| 4
| 22
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
685ee02fbc19ee5a6b9d35f61855a706b51d9f2d
| 26
|
py
|
Python
|
hello_world.py
|
LarsPalmas/profile-rest-api2
|
5858a9342f762df01e5610fe090f8d27b8742d27
|
[
"MIT"
] | null | null | null |
hello_world.py
|
LarsPalmas/profile-rest-api2
|
5858a9342f762df01e5610fe090f8d27b8742d27
|
[
"MIT"
] | 8
|
2019-12-05T01:07:32.000Z
|
2022-02-10T11:52:03.000Z
|
hello_world.py
|
LarsPalmas/profile-rest-api2
|
5858a9342f762df01e5610fe090f8d27b8742d27
|
[
"MIT"
] | null | null | null |
print("Hello World Eini!")
| 26
| 26
| 0.730769
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 26
| 1
| 26
| 26
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d79d48e6c1049bad0901797b24f08f246811fd3d
| 22
|
py
|
Python
|
src/psweep/__init__.py
|
elcorto/psweep
|
b1d372ba19f1d98744e04a1576211d51123272b1
|
[
"BSD-3-Clause"
] | 6
|
2020-03-24T07:24:37.000Z
|
2021-07-29T07:18:59.000Z
|
src/psweep/__init__.py
|
elcorto/psweep
|
b1d372ba19f1d98744e04a1576211d51123272b1
|
[
"BSD-3-Clause"
] | 2
|
2019-08-20T22:14:18.000Z
|
2022-03-11T09:16:59.000Z
|
src/psweep/__init__.py
|
elcorto/psweep
|
b1d372ba19f1d98744e04a1576211d51123272b1
|
[
"BSD-3-Clause"
] | 1
|
2020-02-22T12:13:13.000Z
|
2020-02-22T12:13:13.000Z
|
from .psweep import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7c4761da5e86d99296e44d4e2925878310fd719
| 98
|
py
|
Python
|
fornax/stages/prepare_environment/__init__.py
|
lwencel-priv/fornax
|
0f66a6284975bc5a2cfc3d38bc01ef6ad492e40e
|
[
"MIT"
] | null | null | null |
fornax/stages/prepare_environment/__init__.py
|
lwencel-priv/fornax
|
0f66a6284975bc5a2cfc3d38bc01ef6ad492e40e
|
[
"MIT"
] | null | null | null |
fornax/stages/prepare_environment/__init__.py
|
lwencel-priv/fornax
|
0f66a6284975bc5a2cfc3d38bc01ef6ad492e40e
|
[
"MIT"
] | null | null | null |
"""Prepare environment stage package."""
from .prepare_environment import PrepareEnvironmentStage
| 32.666667
| 56
| 0.836735
| 9
| 98
| 9
| 0.777778
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 98
| 2
| 57
| 49
| 0.9
| 0.346939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7caadf0e5af43670a31ca342530fd496e1683fe
| 33
|
py
|
Python
|
painterAssistant/__init__.py
|
bartoszpogoda/academic-py-painter-assistant
|
2a2dff55e1d7b631f28d6492c5553a8e7ac5abc2
|
[
"MIT"
] | null | null | null |
painterAssistant/__init__.py
|
bartoszpogoda/academic-py-painter-assistant
|
2a2dff55e1d7b631f28d6492c5553a8e7ac5abc2
|
[
"MIT"
] | null | null | null |
painterAssistant/__init__.py
|
bartoszpogoda/academic-py-painter-assistant
|
2a2dff55e1d7b631f28d6492c5553a8e7ac5abc2
|
[
"MIT"
] | null | null | null |
from . import paintCanCalculator
| 16.5
| 32
| 0.848485
| 3
| 33
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7f6d6b2a64adc2158779c58c330914da6b59f15
| 49
|
py
|
Python
|
microservices/svc/multiplier/handler/__init__.py
|
sato-mh/distributed-calculator
|
8d044084a0f70effe5264f3a726962e3ac8da7f5
|
[
"MIT"
] | null | null | null |
microservices/svc/multiplier/handler/__init__.py
|
sato-mh/distributed-calculator
|
8d044084a0f70effe5264f3a726962e3ac8da7f5
|
[
"MIT"
] | null | null | null |
microservices/svc/multiplier/handler/__init__.py
|
sato-mh/distributed-calculator
|
8d044084a0f70effe5264f3a726962e3ac8da7f5
|
[
"MIT"
] | null | null | null |
from .multiplier import Multiplier # noqa: F401
| 24.5
| 48
| 0.77551
| 6
| 49
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.163265
| 49
| 1
| 49
| 49
| 0.853659
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc19738fd831e6d3270cbd9b45609bcd503d9767
| 27
|
py
|
Python
|
scripts/post_process/__init__.py
|
AndAgio/Shallow2Deep
|
e42e9b3b11fdd2ec035144890a88e93a5154276f
|
[
"Apache-2.0"
] | null | null | null |
scripts/post_process/__init__.py
|
AndAgio/Shallow2Deep
|
e42e9b3b11fdd2ec035144890a88e93a5154276f
|
[
"Apache-2.0"
] | 2
|
2021-02-17T12:07:45.000Z
|
2021-02-17T12:16:21.000Z
|
scripts/post_process/__init__.py
|
AndAgio/Shallow2Deep
|
e42e9b3b11fdd2ec035144890a88e93a5154276f
|
[
"Apache-2.0"
] | null | null | null |
from .process_log import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cc2ea080861f80c4ae8ef63134f969e8e9f1745c
| 53,566
|
py
|
Python
|
__init__.py
|
SAM-tak/gen_rigidbodies
|
4cd8f81270dfc03e7705ace6912890ac00a0745a
|
[
"MIT"
] | 7
|
2020-01-14T22:22:04.000Z
|
2022-01-30T06:00:39.000Z
|
__init__.py
|
SAM-tak/gen_rigidbodies
|
4cd8f81270dfc03e7705ace6912890ac00a0745a
|
[
"MIT"
] | null | null | null |
__init__.py
|
SAM-tak/gen_rigidbodies
|
4cd8f81270dfc03e7705ace6912890ac00a0745a
|
[
"MIT"
] | null | null | null |
import bpy
from bpy.props import *
import mathutils
import math
bl_info = {
"name": "Generate rigid bodies from bone",
"author": "SAM-tak, 12funkeys",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "pose > Gen Rigid Bodies",
"description": "Set rigid bodies and constraints easily",
"warning": "",
"support": "COMMUNITY",
"wiki_url": "https://github.com/SAM-tak/gen_rigidbodies/wiki",
"tracker_url": "https://github.com/SAM-tak/gen_rigidbodies",
"category": "Rigging"
}
translation_dict = {
"en_US" : {
("*", "Make Rigid Body Tools") : "Make Rigid Body Tools",
("*", "Gen Rigid Bodies") : "Gen Rigid Bodies",
("*", "Make Rigid Bodies") : "Make Rigid Bodies",
("*", "Add Passives") : "Add Passives",
("*", "Make passive rigid bodies aligned to selected bones") : "Make passive rigid bodies aligned to selected bones",
("*", "Add Actives") : "Add Actives",
("*", "Make active rigid bodies aligned to selected bones") : "Make active rigid bodies aligned to selected bones",
("*", "Add Joints") : "Add Joints",
("*", "Add Actives & Joints") : "Add Actives & Joints"
},
"ja_JP" : {
("*", "Make Rigid Bodies Tools") : "選択ボーン",
("*", "Gen Rigid Bodies") : "剛体ツール",
("*", "Make Rigid Bodies") : "選択ボーン",
("*", "Add Passives") : "基礎剛体の作成‐ボーン追従",
("*", "Make passive rigid bodies aligned to selected bones") : "ボーンに追従する静的剛体を作成します",
("*", "Add Actives") : "基礎剛体の作成‐物理演算",
("*", "Make active rigid bodies aligned to selected bones") : "ボーンに追従する動的剛体を作成します",
("*", "Add Joints") : "基礎Jointの作成",
("*", "Add Actives & Joints") : "基礎剛体/連結Jointの作成"
}
}
shapes = [
('MESH', 'Mesh', 'Mesh'),
('CONVEX_HULL', 'Convex Hull', 'Convex Hull'),
('CONE', 'Cone', 'Cone'),
('CYLINDER', 'Cylinder', 'Cylinder'),
('CAPSULE', 'Capsule', 'Capsule'),
('SPHERE', 'Sphere', 'Sphere'),
('BOX', 'Box', 'Box')
]
types = [
('MOTOR', 'Motor', 'Motor'),
('GENERIC_SPRING', 'Generic Spring', 'Generic Spring'),
('GENERIC', 'Generic', 'Generic')
]
### Menus
class PoseMenu(bpy.types.Menu):
bl_idname = "GENRIGIDBODIES_MT_PoseSubMenuRoot"
bl_label = "Gen Rigid Bodies"
bl_description = "Make rigid bodies & constraint"
def draw(self, context):
self.layout.operator(AddPassiveOperator.bl_idname, icon='BONE_DATA')
self.layout.operator(AddActiveOperator.bl_idname, icon='PHYSICS')
self.layout.operator(AddJointOperator.bl_idname, icon='CONSTRAINT')
self.layout.operator(AddActiveNJointOperator.bl_idname, icon='MOD_PHYSICS')
@staticmethod
def menu_fn(menu, context):
menu.layout.separator()
menu.layout.menu(PoseMenu.bl_idname, icon='MESH_ICOSPHERE')
@classmethod
def register(cls):
bpy.app.translations.register(__name__, translation_dict)
bpy.types.VIEW3D_MT_pose.append(cls.menu_fn)
@classmethod
def unregister(cls):
bpy.types.VIEW3D_MT_pose.remove(cls.menu_fn)
bpy.app.translations.unregister(__name__)
class ObjectMenu(bpy.types.Menu):
bl_idname = "GENRIGIDBODIES_MT_ObjectSubMenuRoot"
bl_label = "Gen Rigid Bodies"
bl_description = "Gen Rigid Bodies Utility"
def draw(self, context):
self.layout.operator(ReparentOrphanTrackObjectOperator.bl_idname)
self.layout.operator(ForceCorrespondNameRBAndTrackObjectOperator.bl_idname)
self.layout.operator(ConnectOperator.bl_idname, icon='MESH_ICOSPHERE')
@staticmethod
def menu_fn(menu, context):
menu.layout.separator()
menu.layout.menu(ObjectMenu.bl_idname)
@classmethod
def register(cls):
bpy.types.VIEW3D_MT_object.append(cls.menu_fn)
@classmethod
def unregister(cls):
bpy.types.VIEW3D_MT_object.remove(cls.menu_fn)
### user prop
class UProp:
rb_shape = EnumProperty(
name='Shape',
description='Choose Rigid Body Shape',
items=shapes,
#items=bpy.types.RigidBodyObject.collision_shape,
#update=update_shape,
default='CAPSULE'
)
rb_dim = FloatVectorProperty(
name = "Dimensions",
description = "rigid body Dimensions XYZ",
default = (1, 1, 1),
subtype = 'XYZ',
unit = 'NONE',
min = 0,
max = 5
)
rb_radius = FloatProperty(
name = "Radius",
description = "rigid body Collision Radius",
default = 0.3,
subtype = 'NONE',
min = 0,
max = 5
)
rb_length = FloatProperty(
name = "Height",
description = "rigid body Collision length",
default = 1.0,
subtype = 'NONE',
min = 0,
max = 5
)
rb_inset_capsule = BoolProperty(
name='Inset Capsule',
description='If shape type is capsule, decrement a length by radius to inscribe',
default=False
)
rb_mass = FloatProperty(
name = "Mass",
description = "rigid body mass",
default = 1.0,
subtype = 'NONE',
min = 0.001
)
rb_friction = FloatProperty(
name = "Friction",
description = "rigid body friction",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
rb_bounciness = FloatProperty(
name = "Bounciness",
description = "rigid body bounciness",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
rb_translation = FloatProperty(
name = "Translation",
description = "rigid body translation",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
rb_rotation = FloatProperty(
name = "Rotation",
description = "rigid body rotation",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
rb_rootbody_passive = BoolProperty(
name='Passive',
description='Rigid Body Type Passive',
default=True
)
rb_add_pole_rootbody = BoolProperty(
name='Add Pole Object',
description='Add Pole Object',
default=False
)
rb_pole_rootbody_dim = FloatVectorProperty(
name = "Pole Object Dimension",
description = "Pole Object Dimension XYZ",
default = (0.33, 0.33, 0.33),
subtype = 'XYZ',
unit = 'NONE',
min = 0,
max = 5
)
rb_rootbody_animated = BoolProperty(
name='animated',
description='Root Rigid Body sets animated',
default=True
)
jo_type = EnumProperty(
name='Type',
description='Choose Contstraint Type',
items=types,
default='GENERIC_SPRING'
)
jo_size = FloatProperty(
name = "joint Size",
description = "joint Size",
default = 0.33,
subtype = 'NONE',
min = 0.001,
max = 1
)
jo_limit_lin_x = BoolProperty(
name='X Axis',
description='limit x',
default=True,
options={'ANIMATABLE'}
)
jo_limit_lin_y = BoolProperty(
name='Y Axis',
description='limit y',
default=True
)
jo_limit_lin_z = BoolProperty(
name='Z Axis',
description='limit z',
default=True
)
jo_limit_lin_x_lower = FloatProperty(
name = "Lower",
description = "joint limit_lin_x_lower",
default = 0,
subtype = 'NONE'
)
jo_limit_lin_y_lower = FloatProperty(
name = "Lower",
description = "joint limit_lin_y_lower",
default = 0,
subtype = 'NONE'
)
jo_limit_lin_z_lower = FloatProperty(
name = "Lower",
description = "joint limit_lin_z_lower",
default = 0,
subtype = 'NONE'
)
jo_limit_lin_x_upper = FloatProperty(
name = "Upper",
description = "joint limit_lin_x_upper",
default = 0,
subtype = 'NONE'
)
jo_limit_lin_y_upper = FloatProperty(
name = "Upper",
description = "joint limit_lin_y_upper",
default = 0,
subtype = 'NONE'
)
jo_limit_lin_z_upper = FloatProperty(
name = "Upper",
description = "joint limit_lin_z_upper",
default = 0,
subtype = 'NONE'
)
jo_limit_ang_x = BoolProperty(
name='X Angle',
description='Angle limit x',
default=True,
options={'ANIMATABLE'}
)
jo_limit_ang_y = BoolProperty(
name='Y Angle',
description='Angle limit y',
default=True
)
jo_limit_ang_z = BoolProperty(
name='Z Angle',
description='Angle limit z',
default=True
)
jo_limit_ang_x_lower = FloatProperty(
name = "Lower",
description = "joint limit_ang_x_lower",
default = -0.785398,
subtype = 'ANGLE'
)
jo_limit_ang_y_lower = FloatProperty(
name = "Lower",
description = "joint limit_ang_y_lower",
default = -0.785398,
subtype = 'ANGLE'
)
jo_limit_ang_z_lower = FloatProperty(
name = "Lower",
description = "joint limit_ang_z_lower",
default = -0.785398,
subtype = 'ANGLE'
)
jo_limit_ang_x_upper = FloatProperty(
name = "Upper",
description = "joint limit_ang_x_upper",
default = 0.785398,
subtype = 'ANGLE'
)
jo_limit_ang_y_upper = FloatProperty(
name = "Upper",
description = "joint limit_ang_y_upper",
default = 0.785398,
subtype = 'ANGLE'
)
jo_limit_ang_z_upper = FloatProperty(
name = "Upper",
description = "joint limit_ang_z_upper",
default = 0.785398,
subtype = 'ANGLE'
)
jo_use_spring_x = BoolProperty(
name='X',
description='use spring x',
default=False
)
jo_use_spring_y = BoolProperty(
name='Y',
description='use spring y',
default=False
)
jo_use_spring_z = BoolProperty(
name='Z',
description='use spring z',
default=False
)
jo_spring_stiffness_x = FloatProperty(
name = "Stiffness",
description = "Stiffness on the X Axis",
default = 10.000,
subtype = 'NONE',
min = 0
)
jo_spring_stiffness_y = FloatProperty(
name = "Stiffness",
description = "Stiffness on the Y Axis",
default = 10.000,
subtype = 'NONE',
min = 0
)
jo_spring_stiffness_z = FloatProperty(
name = "Stiffness",
description = "Stiffness on the Z Axis",
default = 10.000,
subtype = 'NONE',
min = 0
)
jo_spring_damping_x = FloatProperty(
name = "Damping X",
description = "Damping on the X Axis",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
jo_spring_damping_y = FloatProperty(
name = "Damping Y",
description = "Damping on the Y Axis",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
jo_spring_damping_z = FloatProperty(
name = "Damping Z",
description = "Damping on the Z Axis",
default = 0.5,
subtype = 'NONE',
min = 0,
max = 1
)
jo_align_bone = BoolProperty(
name='Align Joint To Bone',
description='Set same rotation of bone to joint object',
default=True
)
### Create Rigid Bodies On Bones
class AddPassiveOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.addpassivejoint"
bl_label = "Add Passives"
bl_description = "Make passive rigid bodies aligned to selected bones"
bl_options = {'REGISTER', 'UNDO'}
###instance UProp.rigidbody
p_rb_shape : UProp.rb_shape
p_rb_dim : UProp.rb_dim
p_rb_radius : UProp.rb_radius
p_rb_length : UProp.rb_length
p_rb_inset_capsule : UProp.rb_inset_capsule
p_rb_mass : UProp.rb_mass
p_rb_friction : UProp.rb_friction
p_rb_bounciness : UProp.rb_bounciness
p_rb_translation : UProp.rb_translation
p_rb_rotation : UProp.rb_rotation
p_rb_rootbody_passive : UProp.rb_rootbody_passive
p_rb_rootbody_animated : UProp.rb_rootbody_animated
def draw(self, context):
box = self.layout.box()
box.prop(self, 'p_rb_shape')
if self.p_rb_shape in ('CONE', 'CYLINDER', 'CAPSULE', 'SPHERE'):
box.prop(self, 'p_rb_radius')
box.prop(self, 'p_rb_length')
if self.p_rb_shape == 'CAPSULE':
box.prop(self, 'p_rb_inset_capsule')
else:
box.prop(self, 'p_rb_dim')
box.prop(self, 'p_rb_mass')
box.prop(self, 'p_rb_friction')
box.prop(self, 'p_rb_bounciness')
box.label(text="Damping:")
box.prop(self, 'p_rb_translation')
box.prop(self, 'p_rb_rotation')
box.prop(self, 'p_rb_rootbody_passive')
box.prop(self, 'p_rb_rootbody_animated')
def execute(self, context):
###selected Armature
ob = context.active_object
#self.report({'INFO'}, ob.data)
if len(context.selected_pose_bones) == 0:
return {'FINISHED'}
params = self
for selected_bone in context.selected_pose_bones:
#self.report({'INFO'}, str(selected_bone.vector[0]))
###Create Rigidbody Cube
bpy.ops.mesh.primitive_cube_add(size=1, location=ob.matrix_world @ selected_bone.center)
rc = context.active_object
if rc is None:
self.report({'INFO'}, 'Rigidboy creation Failded. Verify Rigidbody World exists and set current collection to Rigidbody World')
return {'CANCELLED'}
rc.name = "rb." + ob.name + '.' + selected_bone.name
rc.rotation_mode = 'QUATERNION'
rc.show_in_front = True
rc.display.show_shadows = False
rc.display_type = 'BOUNDS'
rc.hide_render = True
rc.cycles_visibility.transmission = False
rc.cycles_visibility.camera = False
rc.cycles_visibility.diffuse = False
rc.cycles_visibility.scatter = False
rc.cycles_visibility.shadow = False
rc.cycles_visibility.glossy = False
rc.show_bounds = True
rc.display_bounds_type = params.p_rb_shape
align_rb_ort_to_bone(rc, ob, selected_bone.name)
### Rigid Body Dimensions
set_dimentions(context, params, selected_bone)
### Scale Apply
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
### Set Rigid Body
bpy.ops.rigidbody.object_add()
if params.p_rb_rootbody_passive == True:
context.object.rigid_body.type = "PASSIVE"
else:
context.object.rigid_body.type = "ACTIVE"
context.object.rigid_body.collision_shape = params.p_rb_shape
context.object.rigid_body.kinematic = params.p_rb_rootbody_animated
context.object.rigid_body.mass = params.p_rb_mass
context.object.rigid_body.friction = params.p_rb_friction
context.object.rigid_body.restitution = params.p_rb_bounciness
context.object.rigid_body.linear_damping = params.p_rb_translation
context.object.rigid_body.angular_damping = params.p_rb_rotation
### Child OF
CoC = rc.constraints.new('CHILD_OF')
CoC.name = 'Child_Of_' + selected_bone.name
CoC.target = ob
CoC.subtarget = selected_bone.name
#without ops way to childof_set_inverse
sub_target = bpy.data.objects[ob.name].pose.bones[selected_bone.name]
#self.report({'INFO'}, str(sub_target))
CoC.inverse_matrix = (ob.matrix_world @ sub_target.matrix).inverted()
rc.update_tag(refresh={'OBJECT'})
###clear object select
context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='DESELECT')
self.report({'INFO'}, "OK")
return {'FINISHED'}
#
class AddActiveOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.addactive"
bl_label = "Add Actives"
bl_description = "Make active rigid bodies aligned to selected bones"
bl_options = {'REGISTER', 'UNDO'}
tr_size = 0.25
###instance UProp.rigidbody
p_rb_shape : UProp.rb_shape
p_rb_radius : UProp.rb_radius
p_rb_length : UProp.rb_length
p_rb_inset_capsule : UProp.rb_inset_capsule
p_rb_dim : UProp.rb_dim
p_rb_mass : UProp.rb_mass
p_rb_friction : UProp.rb_friction
p_rb_bounciness : UProp.rb_bounciness
p_rb_translation : UProp.rb_translation
p_rb_rotation : UProp.rb_rotation
p_rb_rootbody_animated : UProp.rb_rootbody_animated
def draw(self, context):
box = self.layout.box()
box.prop(self, 'p_rb_shape')
if self.p_rb_shape in ('CONE', 'CYLINDER', 'CAPSULE', 'SPHERE'):
box.prop(self, 'p_rb_radius')
box.prop(self, 'p_rb_length')
if self.p_rb_shape == 'CAPSULE':
box.prop(self, 'p_rb_inset_capsule')
else:
box.prop(self, 'p_rb_dim')
box.prop(self, 'p_rb_mass')
box.prop(self, 'p_rb_friction')
box.prop(self, 'p_rb_bounciness')
box.prop(self, 'p_rb_translation')
box.prop(self, 'p_rb_rotation')
#box.prop(self, 'p_rb_rootbody_passive')
box.prop(self, 'p_rb_rootbody_animated')
###
def execute(self, context):
###selected Armature
ob = context.active_object
#self.report({'INFO'}, ob.data)
spb = context.selected_pose_bones
params = self
bpy.ops.object.mode_set(mode='OBJECT')
for selected_bone in spb:
#self.report({'INFO'}, str(selected_bone.vector[0]))
###Create Rigidbody Cube
bpy.ops.mesh.primitive_cube_add(size=1, location=ob.matrix_world @ selected_bone.center)
rc = context.active_object
if rc is None:
self.report({'INFO'}, 'Rigidboy creation Failded. Verify Rigidbody World exists and set current collection to Rigidbody World')
return {'CANCELLED'}
rc.name = "rb." + ob.name + '.' + selected_bone.name
rc.rotation_mode = 'QUATERNION'
rc.show_in_front = True
rc.display.show_shadows = False
rc.display_type = 'BOUNDS'
rc.hide_render = True
rc.cycles_visibility.transmission = False
rc.cycles_visibility.camera = False
rc.cycles_visibility.diffuse = False
rc.cycles_visibility.scatter = False
rc.cycles_visibility.shadow = False
rc.cycles_visibility.glossy = False
rc.show_bounds = True
rc.display_bounds_type = params.p_rb_shape
align_rb_ort_to_bone(rc, ob, selected_bone.name)
### Rigid Body Dimensions
set_dimentions(context, params, selected_bone)
### Scale Apply
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
### Set Rigid Body
bpy.ops.rigidbody.object_add()
context.object.rigid_body.type = "ACTIVE"
context.object.rigid_body.collision_shape = params.p_rb_shape
context.object.rigid_body.kinematic = params.p_rb_rootbody_animated
context.object.rigid_body.mass = params.p_rb_mass
context.object.rigid_body.friction = params.p_rb_friction
context.object.rigid_body.restitution = params.p_rb_bounciness
context.object.rigid_body.linear_damping = params.p_rb_translation
context.object.rigid_body.angular_damping = params.p_rb_rotation
## Make Track offset point
bpy.ops.object.empty_add(type='ARROWS')
tr = context.active_object
tr.name = "tr." + ob.name + "." + selected_bone.name
tr.empty_display_size = selected_bone.length * self.tr_size
tr.rotation_mode = 'QUATERNION'
### Align track object to bone
align_obj_to_bone(tr, ob, selected_bone.name)
tr.parent = rc
tr.matrix_parent_inverse = rc.matrix_world.inverted()
context.view_layer.objects.active = ob
### bone's use_connect turn to false
bpy.ops.object.mode_set(mode='EDIT')
for selected_bone in spb:
ob.data.edit_bones[selected_bone.name].use_connect = False
### Set Copy Transform Constraint To Bone
bpy.ops.object.mode_set(mode='POSE')
for selected_bone in spb:
tr = bpy.data.objects["tr." + ob.name + "." + selected_bone.name]
#self.report({'INFO'}, str(rc.name))
con = selected_bone.constraints.new('COPY_TRANSFORMS')
#self.report({'INFO'}, "info:" + str(CoC))
con.name = 'Copy Transforms Of ' + tr.name
con.target = tr
### clear object select
context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='DESELECT')
self.report({'INFO'}, "OK")
return {'FINISHED'}
#
class AddJointOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.addjoint"
bl_label = "Add Joints"
bl_description = "Make rigid body constraints on selected bones"
bl_options = {'REGISTER', 'UNDO'}
###instance UProp.joint
joint_type : UProp.jo_type
joint_size : UProp.jo_size
joint_align_bone : UProp.jo_align_bone
joint_Axis_limit_x : UProp.jo_limit_lin_x
joint_Axis_limit_y : UProp.jo_limit_lin_y
joint_Axis_limit_z : UProp.jo_limit_lin_z
joint_Axis_limit_x_lower : UProp.jo_limit_lin_x_lower
joint_Axis_limit_y_lower : UProp.jo_limit_lin_y_lower
joint_Axis_limit_z_lower : UProp.jo_limit_lin_z_lower
joint_Axis_limit_x_upper : UProp.jo_limit_lin_x_upper
joint_Axis_limit_y_upper : UProp.jo_limit_lin_y_upper
joint_Axis_limit_z_upper : UProp.jo_limit_lin_z_upper
joint_Angle_limit_x : UProp.jo_limit_ang_x
joint_Angle_limit_y : UProp.jo_limit_ang_y
joint_Angle_limit_z : UProp.jo_limit_ang_z
joint_Angle_limit_x_lower : UProp.jo_limit_ang_x_lower
joint_Angle_limit_y_lower : UProp.jo_limit_ang_y_lower
joint_Angle_limit_z_lower : UProp.jo_limit_ang_z_lower
joint_Angle_limit_x_upper : UProp.jo_limit_ang_x_upper
joint_Angle_limit_y_upper : UProp.jo_limit_ang_y_upper
joint_Angle_limit_z_upper : UProp.jo_limit_ang_z_upper
joint_use_spring_x : UProp.jo_use_spring_x
joint_use_spring_y : UProp.jo_use_spring_y
joint_use_spring_z : UProp.jo_use_spring_z
joint_spring_stiffness_x : UProp.jo_spring_stiffness_x
joint_spring_stiffness_y : UProp.jo_spring_stiffness_y
joint_spring_stiffness_z : UProp.jo_spring_stiffness_z
joint_spring_damping_x : UProp.jo_spring_damping_x
joint_spring_damping_y : UProp.jo_spring_damping_y
joint_spring_damping_z : UProp.jo_spring_damping_z
def draw(self, context):
box = self.layout.box()
box.prop(self, 'joint_type')
box.prop(self, 'joint_size')
box.prop(self, 'joint_align_bone')
col = box.column(align=True)
col.label(text="Limits:")
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_x', toggle=True)
sub.prop(self, 'joint_Axis_limit_x_lower')
sub.prop(self, 'joint_Axis_limit_x_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_y', toggle=True)
sub.prop(self, 'joint_Axis_limit_y_lower')
sub.prop(self, 'joint_Axis_limit_y_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_z', toggle=True)
sub.prop(self, 'joint_Axis_limit_z_lower')
sub.prop(self, 'joint_Axis_limit_z_upper')
#col = self.layout.column(align=True)
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_x', toggle=True)
sub.prop(self, 'joint_Angle_limit_x_lower')
sub.prop(self, 'joint_Angle_limit_x_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_y', toggle=True)
sub.prop(self, 'joint_Angle_limit_y_lower')
sub.prop(self, 'joint_Angle_limit_y_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_z', toggle=True)
sub.prop(self, 'joint_Angle_limit_z_lower')
sub.prop(self, 'joint_Angle_limit_z_upper')
#col = self.layout.column(align=True)
col.label(text="Springs:")
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_x', toggle=True)
sub.prop(self, 'joint_spring_stiffness_x')
sub.prop(self, 'joint_spring_damping_x')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_y', toggle=True)
sub.prop(self, 'joint_spring_stiffness_y')
sub.prop(self, 'joint_spring_damping_y')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_z', toggle=True)
sub.prop(self, 'joint_spring_stiffness_z')
sub.prop(self, 'joint_spring_damping_z')
###
def execute(self, context):
if context.scene.rigidbody_world is None:
self.report({'INFO'}, 'Faild. Current scene has no Rigidbody World')
return {'CANCELLED'}
###selected Armature
ob = context.active_object
#self.report({'INFO'}, ob.data)
spb = context.selected_pose_bones
### Apply Object transform
bpy.ops.object.mode_set(mode='OBJECT')
params = self
for selected_bone in spb:
#self.report({'INFO'}, str(selected_bone.vector[0]))
###Create Empty Sphere
bpy.ops.object.empty_add(type='ARROWS', location=ob.matrix_world @ selected_bone.matrix @ selected_bone.head)
jc = context.active_object
if jc is None:
self.report({'INFO'}, 'Rigidboy creation Failded. Verify Rigidbody World exists and set current collection to Rigidbody World')
return {'CANCELLED'}
jc.name = "joint." + ob.name + "." + selected_bone.name
jc.show_in_front = True
jc.rotation_mode = 'QUATERNION'
if params.joint_align_bone:
align_obj_to_bone(jc, ob, selected_bone.name)
### Rigid Body Dimensions
context.object.empty_display_size = selected_bone.length * params.joint_size
### Set Rigid Body Constraint
bpy.ops.rigidbody.constraint_add()
context.object.rigid_body_constraint.type = params.joint_type
context.object.rigid_body_constraint.use_breaking = False
context.object.rigid_body_constraint.use_override_solver_iterations = True
context.object.rigid_body_constraint.breaking_threshold = 10
context.object.rigid_body_constraint.solver_iterations = 10
context.object.rigid_body_constraint.use_limit_lin_x = params.joint_Axis_limit_x
context.object.rigid_body_constraint.use_limit_lin_y = params.joint_Axis_limit_y
context.object.rigid_body_constraint.use_limit_lin_z = params.joint_Axis_limit_z
context.object.rigid_body_constraint.limit_lin_x_lower = params.joint_Axis_limit_x_lower
context.object.rigid_body_constraint.limit_lin_y_lower = params.joint_Axis_limit_y_lower
context.object.rigid_body_constraint.limit_lin_z_lower = params.joint_Axis_limit_z_lower
context.object.rigid_body_constraint.limit_lin_x_upper = params.joint_Axis_limit_x_upper
context.object.rigid_body_constraint.limit_lin_y_upper = params.joint_Axis_limit_y_upper
context.object.rigid_body_constraint.limit_lin_z_upper = params.joint_Axis_limit_z_upper
context.object.rigid_body_constraint.use_limit_ang_x = params.joint_Angle_limit_x
context.object.rigid_body_constraint.use_limit_ang_y = params.joint_Angle_limit_y
context.object.rigid_body_constraint.use_limit_ang_z = params.joint_Angle_limit_z
context.object.rigid_body_constraint.limit_ang_x_lower = params.joint_Angle_limit_x_lower
context.object.rigid_body_constraint.limit_ang_y_lower = params.joint_Angle_limit_y_lower
context.object.rigid_body_constraint.limit_ang_z_lower = params.joint_Angle_limit_z_lower
context.object.rigid_body_constraint.limit_ang_x_upper = params.joint_Angle_limit_x_upper
context.object.rigid_body_constraint.limit_ang_y_upper = params.joint_Angle_limit_y_upper
context.object.rigid_body_constraint.limit_ang_z_upper = params.joint_Angle_limit_z_upper
context.object.rigid_body_constraint.use_spring_x = params.joint_use_spring_x
context.object.rigid_body_constraint.use_spring_y = params.joint_use_spring_y
context.object.rigid_body_constraint.use_spring_z = params.joint_use_spring_z
context.object.rigid_body_constraint.spring_stiffness_x = params.joint_spring_stiffness_x
context.object.rigid_body_constraint.spring_stiffness_y = params.joint_spring_stiffness_y
context.object.rigid_body_constraint.spring_stiffness_z = params.joint_spring_stiffness_z
context.object.rigid_body_constraint.spring_damping_x = params.joint_spring_damping_x
context.object.rigid_body_constraint.spring_damping_y = params.joint_spring_damping_y
context.object.rigid_body_constraint.spring_damping_z = params.joint_spring_damping_z
###constraint.object
if selected_bone.parent:
rbname = "rb." + ob.name + "." + selected_bone.parent.name
if rbname in context.view_layer.objects:
context.object.rigid_body_constraint.object1 = context.view_layer.objects[rbname]
rbname = "rb." + ob.name + "." + selected_bone.name
if rbname in context.view_layer.objects:
context.object.rigid_body_constraint.object2 = context.view_layer.objects[rbname]
###clear object select
context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='DESELECT')
self.report({'INFO'}, "OK")
return {'FINISHED'}
class AddActiveNJointOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.addactivenjoint"
bl_label = "Add Actives & Joints"
bl_description = "Make active rigid bodies & constraints"
bl_options = {'REGISTER', 'UNDO'}
tr_size = 0.5
###instance UProp.rigidbody
p_rb_shape : UProp.rb_shape
p_rb_radius : UProp.rb_radius
p_rb_length : UProp.rb_length
p_rb_inset_capsule : UProp.rb_inset_capsule
p_rb_dim : UProp.rb_dim
p_rb_mass : UProp.rb_mass
p_rb_friction : UProp.rb_friction
p_rb_bounciness : UProp.rb_bounciness
p_rb_translation : UProp.rb_translation
p_rb_rotation : UProp.rb_rotation
p_rb_add_pole_rootbody : UProp.rb_add_pole_rootbody
p_rb_pole_rootbody_dim : UProp.rb_pole_rootbody_dim
###instance UProp.joint
joint_type : UProp.jo_type
joint_size : UProp.jo_size
joint_align_bone : UProp.jo_align_bone
joint_Axis_limit_x : UProp.jo_limit_lin_x
joint_Axis_limit_y : UProp.jo_limit_lin_y
joint_Axis_limit_z : UProp.jo_limit_lin_z
joint_Axis_limit_x_lower : UProp.jo_limit_lin_x_lower
joint_Axis_limit_y_lower : UProp.jo_limit_lin_y_lower
joint_Axis_limit_z_lower : UProp.jo_limit_lin_z_lower
joint_Axis_limit_x_upper : UProp.jo_limit_lin_x_upper
joint_Axis_limit_y_upper : UProp.jo_limit_lin_y_upper
joint_Axis_limit_z_upper : UProp.jo_limit_lin_z_upper
joint_Angle_limit_x : UProp.jo_limit_ang_x
joint_Angle_limit_y : UProp.jo_limit_ang_y
joint_Angle_limit_z : UProp.jo_limit_ang_z
joint_Angle_limit_x_lower : UProp.jo_limit_ang_x_lower
joint_Angle_limit_y_lower : UProp.jo_limit_ang_y_lower
joint_Angle_limit_z_lower : UProp.jo_limit_ang_z_lower
joint_Angle_limit_x_upper : UProp.jo_limit_ang_x_upper
joint_Angle_limit_y_upper : UProp.jo_limit_ang_y_upper
joint_Angle_limit_z_upper : UProp.jo_limit_ang_z_upper
joint_use_spring_x : UProp.jo_use_spring_x
joint_use_spring_y : UProp.jo_use_spring_y
joint_use_spring_z : UProp.jo_use_spring_z
joint_spring_stiffness_x : UProp.jo_spring_stiffness_x
joint_spring_stiffness_y : UProp.jo_spring_stiffness_y
joint_spring_stiffness_z : UProp.jo_spring_stiffness_z
joint_spring_damping_x : UProp.jo_spring_damping_x
joint_spring_damping_y : UProp.jo_spring_damping_y
joint_spring_damping_z : UProp.jo_spring_damping_z
def draw(self, context):
###Rigid Body Object
box = self.layout.box()
box.prop(self, 'p_rb_shape')
if self.p_rb_shape in ('CONE', 'CYLINDER', 'CAPSULE', 'SPHERE'):
box.prop(self, 'p_rb_radius')
box.prop(self, 'p_rb_length')
if self.p_rb_shape == 'CAPSULE':
box.prop(self, 'p_rb_inset_capsule')
else:
box.prop(self, 'p_rb_dim')
box.prop(self, 'p_rb_mass')
box.prop(self, 'p_rb_friction')
box.prop(self, 'p_rb_bounciness')
box.prop(self, 'p_rb_translation')
box.prop(self, 'p_rb_rotation')
#Joint Object
box = self.layout.box()
box.prop(self, 'joint_type')
box.prop(self, 'joint_size')
box.prop(self, 'joint_align_bone')
box.prop(self, 'p_rb_add_pole_rootbody')
col = box.column(align=True)
col.label(text="Limits:")
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_x', toggle=True)
sub.prop(self, 'joint_Axis_limit_x_lower')
sub.prop(self, 'joint_Axis_limit_x_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_y', toggle=True)
sub.prop(self, 'joint_Axis_limit_y_lower')
sub.prop(self, 'joint_Axis_limit_y_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Axis_limit_z', toggle=True)
sub.prop(self, 'joint_Axis_limit_z_lower')
sub.prop(self, 'joint_Axis_limit_z_upper')
#col = self.layout.column(align=True)
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_x', toggle=True)
sub.prop(self, 'joint_Angle_limit_x_lower')
sub.prop(self, 'joint_Angle_limit_x_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_y', toggle=True)
sub.prop(self, 'joint_Angle_limit_y_lower')
sub.prop(self, 'joint_Angle_limit_y_upper')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_Angle_limit_z', toggle=True)
sub.prop(self, 'joint_Angle_limit_z_lower')
sub.prop(self, 'joint_Angle_limit_z_upper')
#col = self.layout.column(align=True)
col.label(text="Springs:")
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_x', toggle=True)
sub.prop(self, 'joint_spring_stiffness_x')
sub.prop(self, 'joint_spring_damping_x')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_y', toggle=True)
sub.prop(self, 'joint_spring_stiffness_y')
sub.prop(self, 'joint_spring_damping_y')
row = col.row(align=True)
sub = row.row(align=True)
#sub.alignment = 'EXPAND'
sub.prop(self, 'joint_use_spring_z', toggle=True)
sub.prop(self, 'joint_spring_stiffness_z')
sub.prop(self, 'joint_spring_damping_z')
#
def execute(self, context):
###selected Armature
ob = context.active_object
#self.report({'INFO'}, "ob:" + str(ob))
spb = context.selected_pose_bones
bpy.ops.object.mode_set(mode='OBJECT')
params = self
rb_dict = {}
###Rigid Body Session
for selected_bone in spb:
#self.report({'INFO'}, str(selected_bone.vector[0]))
###Create Rigidbody Cube
bpy.ops.mesh.primitive_cube_add(size=1, location=ob.matrix_world @ selected_bone.center)
rc = context.active_object
if rc is None:
self.report({'INFO'}, 'Rigidbody creation Failded. Verify Rigidbody World exists and set current collection to Rigidbody World')
return {'CANCELLED'}
rc.name = "rb." + ob.name + "." + selected_bone.name
rc.rotation_mode = 'QUATERNION'
rc.show_in_front = True
rc.display.show_shadows = False
rc.display_type = 'BOUNDS'
rc.hide_render = True
rc.show_bounds = True
rc.display_bounds_type = params.p_rb_shape
rb_dict[selected_bone] = rc
### Aligh to Bone
align_rb_ort_to_bone(rc, ob, selected_bone.name)
### Rigid Body Dimensions
set_dimentions(context, params, selected_bone)
### Scale Apply
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
### Set Rigid Body
bpy.ops.rigidbody.object_add()
context.object.rigid_body.type = "ACTIVE"
context.object.rigid_body.collision_shape = params.p_rb_shape
context.object.rigid_body.mass = params.p_rb_mass
context.object.rigid_body.friction = params.p_rb_friction
context.object.rigid_body.restitution = params.p_rb_bounciness
context.object.rigid_body.linear_damping = params.p_rb_translation
context.object.rigid_body.angular_damping = params.p_rb_rotation
## Make Track offset point
bpy.ops.object.empty_add(type='ARROWS')
tr = context.active_object
tr.name = "tr." + ob.name + "." + selected_bone.name
tr.empty_display_size = selected_bone.length * params.joint_size * self.tr_size
tr.rotation_mode = 'QUATERNION'
### Align track object to bone
align_obj_to_bone(tr, ob, selected_bone.name)
tr.parent = rc
tr.matrix_parent_inverse = rc.matrix_world.inverted()
if selected_bone.parent is not None and selected_bone.parent not in spb and selected_bone.parent not in rb_dict:
if "rb." + ob.name + "." + selected_bone.parent.name in context.view_layer.objects:
rb_dict[selected_bone.parent] = context.view_layer.objects["rb." + ob.name + "." + selected_bone.parent.name]
elif params.p_rb_add_pole_rootbody:
###Create Rigidbody Cube
bpy.ops.mesh.primitive_cube_add(size=1, location=ob.matrix_world @ selected_bone.matrix @ selected_bone.head)
rc = context.active_object
rc.name = "rb.pole." + ob.name + "." + selected_bone.parent.name
rc.rotation_mode = 'QUATERNION'
rc.show_in_front = True
rc.display.show_shadows = False
rc.hide_render = True
rc.display_type = 'BOUNDS'
rc.show_bounds = True
rc.display_bounds_type = 'BOX'
rb_dict[selected_bone.parent] = rc
### Rigid Body Dimensions
context.object.dimensions = [
selected_bone.parent.length * params.p_rb_pole_rootbody_dim[0],
selected_bone.parent.length * params.p_rb_pole_rootbody_dim[1],
selected_bone.parent.length * params.p_rb_pole_rootbody_dim[2]
]
### Scale Apply
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
### Set Rigid Body
bpy.ops.rigidbody.object_add()
rc.rigid_body.type = "PASSIVE"
rc.rigid_body.collision_shape = "BOX"
#rc.rigid_body.collision_shape = params.p_rb_shape
#rc.rigid_body.mass = params.p_rb_mass
#rc.rigid_body.friction = params.p_rb_friction
#rc.rigid_body.restitution = params.p_rb_bounciness
#rc.rigid_body.linear_damping = params.p_rb_translation
#rc.rigid_body.angular_damping = params.p_rb_rotation
rc.rigid_body.kinematic = True
### Child OF
CoC = rc.constraints.new('CHILD_OF')
CoC.name = 'Child_Of_' + selected_bone.parent.name
CoC.target = ob
CoC.subtarget = selected_bone.parent.name
#without ops way to childof_set_inverse
sub_target = bpy.data.objects[ob.name].pose.bones[selected_bone.parent.name]
#self.report({'INFO'}, str(sub_target))
CoC.inverse_matrix = (ob.matrix_world @ sub_target.matrix).inverted()
#context.view_layer.update()
print('Joint Session')
###Joint Session
for selected_bone in spb:
#self.report({'INFO'}, str(selected_bone.vector[0]))
if selected_bone in rb_dict:
###Create Joint Empty
bpy.ops.object.empty_add(type='ARROWS', location=ob.matrix_world @ selected_bone.matrix @ selected_bone.head)
jc = context.active_object
jc.name = "joint." + ob.name + "." + selected_bone.name
jc.show_in_front = True
jc.rotation_mode = 'QUATERNION'
if params.joint_align_bone:
align_obj_to_bone(jc, ob, selected_bone.name)
### Set Joint radius
context.object.empty_display_size = selected_bone.length * params.joint_size
### Set Rigid Body Constraint
bpy.ops.rigidbody.constraint_add()
if selected_bone.parent in rb_dict:
jc.rigid_body_constraint.object1 = rb_dict[selected_bone.parent]
jc.rigid_body_constraint.object2 = rb_dict[selected_bone]
jc.rigid_body_constraint.type = params.joint_type
jc.rigid_body_constraint.use_breaking = False
jc.rigid_body_constraint.use_override_solver_iterations = True
jc.rigid_body_constraint.breaking_threshold = 10
jc.rigid_body_constraint.solver_iterations = 10
jc.rigid_body_constraint.use_limit_lin_x = params.joint_Axis_limit_x
jc.rigid_body_constraint.use_limit_lin_y = params.joint_Axis_limit_y
jc.rigid_body_constraint.use_limit_lin_z = params.joint_Axis_limit_z
jc.rigid_body_constraint.limit_lin_x_lower = params.joint_Axis_limit_x_lower
jc.rigid_body_constraint.limit_lin_y_lower = params.joint_Axis_limit_y_lower
jc.rigid_body_constraint.limit_lin_z_lower = params.joint_Axis_limit_z_lower
jc.rigid_body_constraint.limit_lin_x_upper = params.joint_Axis_limit_x_upper
jc.rigid_body_constraint.limit_lin_y_upper = params.joint_Axis_limit_y_upper
jc.rigid_body_constraint.limit_lin_z_upper = params.joint_Axis_limit_z_upper
jc.rigid_body_constraint.use_limit_ang_x = params.joint_Angle_limit_x
jc.rigid_body_constraint.use_limit_ang_y = params.joint_Angle_limit_y
jc.rigid_body_constraint.use_limit_ang_z = params.joint_Angle_limit_z
jc.rigid_body_constraint.limit_ang_x_lower = params.joint_Angle_limit_x_lower
jc.rigid_body_constraint.limit_ang_y_lower = params.joint_Angle_limit_y_lower
jc.rigid_body_constraint.limit_ang_z_lower = params.joint_Angle_limit_z_lower
jc.rigid_body_constraint.limit_ang_x_upper = params.joint_Angle_limit_x_upper
jc.rigid_body_constraint.limit_ang_y_upper = params.joint_Angle_limit_y_upper
jc.rigid_body_constraint.limit_ang_z_upper = params.joint_Angle_limit_z_upper
jc.rigid_body_constraint.use_spring_x = params.joint_use_spring_x
jc.rigid_body_constraint.use_spring_y = params.joint_use_spring_y
jc.rigid_body_constraint.use_spring_z = params.joint_use_spring_z
jc.rigid_body_constraint.spring_stiffness_x = params.joint_spring_stiffness_x
jc.rigid_body_constraint.spring_stiffness_y = params.joint_spring_stiffness_y
jc.rigid_body_constraint.spring_stiffness_z = params.joint_spring_stiffness_z
jc.rigid_body_constraint.spring_damping_x = params.joint_spring_damping_x
jc.rigid_body_constraint.spring_damping_y = params.joint_spring_damping_y
jc.rigid_body_constraint.spring_damping_z = params.joint_spring_damping_z
context.view_layer.objects.active = ob
###bone's use_connect turn to false
bpy.ops.object.mode_set(mode='EDIT')
for selected_bone in spb:
ob.data.edit_bones[selected_bone.name].use_connect = False
### Set Copy Transform Constraint To Bone
bpy.ops.object.mode_set(mode='POSE')
for selected_bone in spb:
#bpy.ops.pose.armature_apply()
ab = selected_bone
tr = bpy.data.objects["tr." + ob.name + "." + selected_bone.name]
#self.report({'INFO'}, str(rc.name))
con = ab.constraints.new('COPY_TRANSFORMS')
#self.report({'INFO'}, "info:" + str(CoC))
con.name = 'Copy Transforms Of ' + tr.name
con.target = tr
###clear object select
context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='DESELECT')
self.report({'INFO'}, "OK")
return {'FINISHED'}
class ReparentOrphanTrackObjectOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.reparent_orphan_track_object"
bl_label = "Reparent Orphan Track Object"
bl_description = "Parent unparented 'tr.' object to corresponding 'rb.' object by keep transforming parenting."
bl_options = {'UNDO'}
def execute(self, context):
print(context.view_layer.objects)
for i in context.selected_objects:
if i.name.startswith("tr."):
correspondName = 'rb' + i.name[2:]
print(correspondName)
if correspondName in context.view_layer.objects:
print('parent')
parentObject = context.view_layer.objects[correspondName]
i.parent = parentObject
i.matrix_parent_inverse = parentObject.matrix_world.inverted()
return {'FINISHED'}
class ForceCorrespondNameRBAndTrackObjectOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.force_correspond_name_rb_n_tr"
bl_label = "Repair Corresponding"
bl_description = "If 'tr.' object's parent 'rb.' object has non-corresponding name, rename it."
bl_options = {'UNDO'}
def execute(self, context):
print(context.view_layer.objects)
for i in context.selected_objects:
if i.name.startswith("tr.") and i.parent and i.parent.name.startswith("rb."):
correspondName = 'rb' + i.name[2:]
print(correspondName)
if correspondName != i.parent.name:
print('rename')
i.parent.name = correspondName
return {'FINISHED'}
class ConnectOperator(bpy.types.Operator):
bl_idname = "genrigidbodies.connect"
bl_label = "Connect Rigid Body Constraint"
bl_description = "Set selected objects' 'Objects' paratemter of rigid body constraint to active object."
bl_options = {'UNDO'}
def execute(self, context):
for i in context.selected_objects:
if i != context.active_object and i.rigid_body_constraint:
i.rigid_body_constraint.object1 = context.active_object
return {'FINISHED'}
# utils
def set_dimentions(context, params, selected_bone):
if params.p_rb_shape in ('CONE', 'CYLINDER', 'CAPSULE', 'SPHERE'):
if params.p_rb_shape == 'CAPSULE' and not params.p_rb_inset_capsule:
context.object.dimensions = [
selected_bone.length * params.p_rb_radius,
selected_bone.length * params.p_rb_radius,
selected_bone.length * (params.p_rb_length + params.p_rb_radius)
]
else:
context.object.dimensions = [
selected_bone.length * params.p_rb_radius,
selected_bone.length * params.p_rb_radius,
selected_bone.length * params.p_rb_length
]
else:
context.object.dimensions = [
selected_bone.length * params.p_rb_dim[0],
selected_bone.length * params.p_rb_dim[2],
selected_bone.length * params.p_rb_dim[1]
]
def align_obj_to_bone(obj, armature, bone_name):
bone = armature.data.bones[bone_name]
mat = armature.matrix_world @ bone.matrix_local
obj.location = mat.to_translation()
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = mat.to_quaternion()
def align_rb_ort_to_bone(obj, armature, bone_name):
bone = armature.data.bones[bone_name]
mat = armature.matrix_world @ bone.matrix_local @ mathutils.Matrix.Rotation(math.radians(-90.0), 4, 'X')
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = mat.to_quaternion()
# add menu
register, unregister = bpy.utils.register_classes_factory((
AddPassiveOperator,
AddActiveOperator,
AddJointOperator,
AddActiveNJointOperator,
ReparentOrphanTrackObjectOperator,
ForceCorrespondNameRBAndTrackObjectOperator,
ConnectOperator,
PoseMenu,
ObjectMenu,
))
| 39.915052
| 145
| 0.61455
| 6,520
| 53,566
| 4.736503
| 0.055982
| 0.038469
| 0.045528
| 0.041319
| 0.820446
| 0.787676
| 0.758921
| 0.724564
| 0.667703
| 0.645684
| 0
| 0.004858
| 0.289083
| 53,566
| 1,341
| 146
| 39.944817
| 0.806045
| 0.053
| 0
| 0.586502
| 1
| 0.000951
| 0.137868
| 0.025121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020913
| false
| 0.01711
| 0.003802
| 0
| 0.212928
| 0.006654
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bd9d8d1f62c8f316f81b01c83f5c18f64ca852d
| 3,598
|
py
|
Python
|
tests/utmp.py
|
jleaniz/dtformats
|
03c29442c6a98bd4998314fca7274572ea848a84
|
[
"Apache-2.0"
] | 61
|
2017-08-30T11:13:17.000Z
|
2022-03-24T20:45:18.000Z
|
tests/utmp.py
|
jleaniz/dtformats
|
03c29442c6a98bd4998314fca7274572ea848a84
|
[
"Apache-2.0"
] | 12
|
2017-05-01T10:22:49.000Z
|
2022-02-11T05:51:18.000Z
|
tests/utmp.py
|
jleaniz/dtformats
|
03c29442c6a98bd4998314fca7274572ea848a84
|
[
"Apache-2.0"
] | 19
|
2018-08-16T09:32:07.000Z
|
2021-11-19T17:14:02.000Z
|
# -*- coding: utf-8 -*-
"""Tests for utmp files."""
import unittest
from dtformats import utmp
from tests import test_lib
class LinuxLibc6UtmpFileTest(test_lib.BaseTestCase):
"""Linux libc6 utmp file tests."""
# pylint: disable=protected-access
def testDebugPrintEntry(self):
"""Tests the _DebugPrintEntry function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('linux_libc6_utmp_entry')
entry = data_type_map.CreateStructureValues(
ip_address=test_file._EMPTY_IP_ADDRESS,
exit_status=5,
hostname=b'host',
microseconds=8,
pid=2,
session=6,
terminal=b'vty',
terminal_identifier=3,
termination_status=4,
timestamp=7,
type=1,
unknown1=b'unknown',
username=b'user')
test_file._DebugPrintEntry(entry)
def testDecodeString(self):
"""Tests the _DecodeString function."""
test_file = utmp.LinuxLibc6UtmpFile()
string = test_file._DecodeString(b'test\x00')
self.assertEqual(string, 'test')
def testReadEntries(self):
"""Tests the _ReadEntries function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadEntries(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
class MacOSXUtmpxFileTest(test_lib.BaseTestCase):
"""Mac OS X 10.5 utmpx file tests."""
# pylint: disable=protected-access
def testDebugPrintEntry(self):
"""Tests the _DebugPrintEntry function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('macosx_utmpx_entry')
entry = data_type_map.CreateStructureValues(
hostname=b'host',
microseconds=1,
pid=2,
terminal=b'vty',
terminal_identifier=3,
timestamp=4,
type=5,
unknown1=6,
unknown2=b'unknown',
username=b'user')
test_file._DebugPrintEntry(entry)
def testDecodeString(self):
"""Tests the _DecodeString function."""
test_file = utmp.MacOSXUtmpxFile()
string = test_file._DecodeString(b'test\x00')
self.assertEqual(string, 'test')
def testReadEntries(self):
"""Tests the _ReadEntries function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmpx-macosx10.5'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadEntries(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmpx-macosx10.5'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
| 28.555556
| 80
| 0.712618
| 412
| 3,598
| 5.915049
| 0.237864
| 0.101764
| 0.059089
| 0.046779
| 0.808781
| 0.808781
| 0.748872
| 0.748872
| 0.748872
| 0.73533
| 0
| 0.01355
| 0.179544
| 3,598
| 125
| 81
| 28.784
| 0.811992
| 0.119789
| 0
| 0.657895
| 0
| 0
| 0.05661
| 0.007076
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.105263
| false
| 0
| 0.039474
| 0
| 0.171053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04051ffeeaeb6bc8d951d79951e14ed8c0725a58
| 40
|
py
|
Python
|
hnms/__init__.py
|
microsoft/hnms
|
c7368cd9079fe576a61cdf4e1872b326485f2464
|
[
"MIT"
] | 30
|
2020-05-26T01:33:05.000Z
|
2020-10-21T23:50:31.000Z
|
hnms/__init__.py
|
microsoft/hnms
|
c7368cd9079fe576a61cdf4e1872b326485f2464
|
[
"MIT"
] | 1
|
2020-05-31T12:18:09.000Z
|
2020-05-31T14:09:05.000Z
|
hnms/__init__.py
|
microsoft/hnms
|
c7368cd9079fe576a61cdf4e1872b326485f2464
|
[
"MIT"
] | 5
|
2020-05-26T02:11:47.000Z
|
2021-11-10T08:33:32.000Z
|
from .multi_hnms import MultiHNMS, HNMS
| 20
| 39
| 0.825
| 6
| 40
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
041a9fa62dc55ae70593d44254261599b82add98
| 341
|
py
|
Python
|
pipeline/__init__.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
pipeline/__init__.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
pipeline/__init__.py
|
cnwarden/mico2
|
017c52ef4626649d162904f247f21eec768fafc3
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import elasticsearch
import pykafka
from pipeline.pipeline_manager import PipelineManager
from pipeline.pipelines import ESPipeline, KafkaPipeline, StorePipeline, SimplePipeline, SaveToDictPipeline
__all__ = ['PipelineManager', 'ESPipeline', 'KafkaPipeline', 'StorePipeline', 'SimplePipeline', 'SaveToDictPipeline']
| 37.888889
| 118
| 0.809384
| 29
| 341
| 9.344828
| 0.586207
| 0.088561
| 0.265683
| 0.369004
| 0.501845
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003279
| 0.105572
| 341
| 8
| 119
| 42.625
| 0.885246
| 0.035191
| 0
| 0
| 0
| 0
| 0.260188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04362d1c0bc89f03b2612adaea14f8dfb97bce44
| 2,069
|
py
|
Python
|
groupdocs_comparison_cloud/models/__init__.py
|
groupdocs-comparison-cloud/groupdocs-comparison-cloud-python
|
f970b22fae7a791d07b756c2d418217fd368c289
|
[
"MIT"
] | null | null | null |
groupdocs_comparison_cloud/models/__init__.py
|
groupdocs-comparison-cloud/groupdocs-comparison-cloud-python
|
f970b22fae7a791d07b756c2d418217fd368c289
|
[
"MIT"
] | null | null | null |
groupdocs_comparison_cloud/models/__init__.py
|
groupdocs-comparison-cloud/groupdocs-comparison-cloud-python
|
f970b22fae7a791d07b756c2d418217fd368c289
|
[
"MIT"
] | 1
|
2021-02-02T18:41:48.000Z
|
2021-02-02T18:41:48.000Z
|
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models
from groupdocs_comparison_cloud.models.apply_revisions_options import ApplyRevisionsOptions
from groupdocs_comparison_cloud.models.change_info import ChangeInfo
from groupdocs_comparison_cloud.models.comparison_options import ComparisonOptions
from groupdocs_comparison_cloud.models.diagram_master_setting import DiagramMasterSetting
from groupdocs_comparison_cloud.models.disc_usage import DiscUsage
from groupdocs_comparison_cloud.models.error import Error
from groupdocs_comparison_cloud.models.error_details import ErrorDetails
from groupdocs_comparison_cloud.models.file_info import FileInfo
from groupdocs_comparison_cloud.models.file_versions import FileVersions
from groupdocs_comparison_cloud.models.files_list import FilesList
from groupdocs_comparison_cloud.models.files_upload_result import FilesUploadResult
from groupdocs_comparison_cloud.models.format import Format
from groupdocs_comparison_cloud.models.formats_result import FormatsResult
from groupdocs_comparison_cloud.models.info_result import InfoResult
from groupdocs_comparison_cloud.models.items_style import ItemsStyle
from groupdocs_comparison_cloud.models.link import Link
from groupdocs_comparison_cloud.models.metadata import Metadata
from groupdocs_comparison_cloud.models.object_exist import ObjectExist
from groupdocs_comparison_cloud.models.page_info import PageInfo
from groupdocs_comparison_cloud.models.rectangle import Rectangle
from groupdocs_comparison_cloud.models.revision_info import RevisionInfo
from groupdocs_comparison_cloud.models.settings import Settings
from groupdocs_comparison_cloud.models.size import Size
from groupdocs_comparison_cloud.models.storage_exist import StorageExist
from groupdocs_comparison_cloud.models.storage_file import StorageFile
from groupdocs_comparison_cloud.models.style_change_info import StyleChangeInfo
from groupdocs_comparison_cloud.models.file_version import FileVersion
from groupdocs_comparison_cloud.models.updates_options import UpdatesOptions
| 59.114286
| 91
| 0.908652
| 261
| 2,069
| 6.873563
| 0.260536
| 0.202899
| 0.358974
| 0.437012
| 0.556299
| 0.19621
| 0
| 0
| 0
| 0
| 0
| 0.00103
| 0.061382
| 2,069
| 34
| 92
| 60.852941
| 0.92276
| 0.019333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
044fcca08ab99ca3809d45e924f55259da622082
| 30
|
py
|
Python
|
Grapple/__init__.py
|
elctrc/grapple
|
0131bd63711a0695b2fa5f17464655083404b97f
|
[
"Apache-2.0"
] | 3
|
2021-06-16T15:49:57.000Z
|
2021-09-01T16:52:15.000Z
|
Grapple/__init__.py
|
elctrc/grapple
|
0131bd63711a0695b2fa5f17464655083404b97f
|
[
"Apache-2.0"
] | null | null | null |
Grapple/__init__.py
|
elctrc/grapple
|
0131bd63711a0695b2fa5f17464655083404b97f
|
[
"Apache-2.0"
] | null | null | null |
from .grapple import HayLoader
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f08ed7656c5c84be93bc864aec551a09b31e4dd3
| 164
|
py
|
Python
|
emulator/script2.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 6
|
2019-01-09T11:55:15.000Z
|
2021-06-25T19:52:42.000Z
|
emulator/script2.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 65
|
2018-12-12T08:40:38.000Z
|
2022-02-28T09:19:45.000Z
|
emulator/script2.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 9
|
2018-11-23T08:59:09.000Z
|
2020-02-04T12:56:35.000Z
|
#input_column: b,string,VARCHAR(100),100,None,None
#input_type: SET
#output_column: b,string,VARCHAR(100),100,None,None
#output_type: EMITS
#!/bin/bash
ls -l /tmp
| 20.5
| 51
| 0.75
| 29
| 164
| 4.103448
| 0.551724
| 0.117647
| 0.218487
| 0.336134
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0
| 0
| 0
| 0.07947
| 0.079268
| 164
| 7
| 52
| 23.428571
| 0.708609
| 0.865854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0fd8f98ada3234e49c76eb4884461094bb94f54
| 93
|
py
|
Python
|
terrascript/vcd/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/vcd/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/vcd/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/vcd/__init__.py
import terrascript
class vcd(terrascript.Provider):
pass
| 13.285714
| 32
| 0.774194
| 11
| 93
| 6.181818
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 6
| 33
| 15.5
| 0.85
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
9bd15d570c4edfc4bfb31a2706ea18aa04efa9f5
| 137
|
py
|
Python
|
tests/job_scrapper.py
|
DannyMcwaves/ATS
|
91327ce15b4c4ea2fffebf02562cb8095b7983ec
|
[
"BSD-3-Clause"
] | null | null | null |
tests/job_scrapper.py
|
DannyMcwaves/ATS
|
91327ce15b4c4ea2fffebf02562cb8095b7983ec
|
[
"BSD-3-Clause"
] | 4
|
2020-06-05T17:38:46.000Z
|
2022-03-02T14:54:30.000Z
|
tests/job_scrapper.py
|
DannyMcwaves/ATS
|
91327ce15b4c4ea2fffebf02562cb8095b7983ec
|
[
"BSD-3-Clause"
] | null | null | null |
from job_scraper import run
url = 'https://stackoverflow.com/jobs/139474/full-stack-developer-with-a-passion-for-borderguru'
run(url)
| 19.571429
| 96
| 0.781022
| 21
| 137
| 5.047619
| 0.904762
| 0.113208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.080292
| 137
| 6
| 97
| 22.833333
| 0.793651
| 0
| 0
| 0
| 0
| 0.333333
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
ac969f51247bccae56dbcd467866fade7d7c0225
| 334
|
py
|
Python
|
Beta/Never gonna give you up.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
Beta/Never gonna give you up.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
Beta/Never gonna give you up.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def music(numbers):
s="Never gonna give you up\nNever gonna let you down\nNever gonna run around and desert you\nNever gonna make you cry\nNever gonna say goodbye\nNever gonna tell a lie and hurt you".split("\n")
return [s[val].replace("Never gonna", "NEVER GONNA") if index%2 else s[val] for index, val in enumerate(numbers)]
| 111.333333
| 196
| 0.739521
| 60
| 334
| 4.116667
| 0.6
| 0.222672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003546
| 0.155689
| 334
| 3
| 197
| 111.333333
| 0.87234
| 0
| 0
| 0
| 0
| 0.333333
| 0.597015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ac9e0e0b5801506d8f955bb2b8721ba557314363
| 27
|
py
|
Python
|
hello.py
|
Feister31/Election-Analysis
|
5884c906bcc12bcc682f0ed930d72f06a2a090d8
|
[
"MIT"
] | null | null | null |
hello.py
|
Feister31/Election-Analysis
|
5884c906bcc12bcc682f0ed930d72f06a2a090d8
|
[
"MIT"
] | null | null | null |
hello.py
|
Feister31/Election-Analysis
|
5884c906bcc12bcc682f0ed930d72f06a2a090d8
|
[
"MIT"
] | null | null | null |
print("election_analysis")
| 13.5
| 26
| 0.814815
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 27
| 1
| 27
| 27
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
acc0b6df2b6aceb61444b8085229d0462e37573d
| 879
|
py
|
Python
|
terrascript/digitalocean/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/digitalocean/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/digitalocean/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/digitalocean/d.py
import terrascript
class digitalocean_certificate(terrascript.Data):
pass
class digitalocean_database_cluster(terrascript.Data):
pass
class digitalocean_domain(terrascript.Data):
pass
class digitalocean_droplet(terrascript.Data):
pass
class digitalocean_droplet_snapshot(terrascript.Data):
pass
class digitalocean_floating_ip(terrascript.Data):
pass
class digitalocean_image(terrascript.Data):
pass
class digitalocean_kubernetes_cluster(terrascript.Data):
pass
class digitalocean_loadbalancer(terrascript.Data):
pass
class digitalocean_record(terrascript.Data):
pass
class digitalocean_ssh_key(terrascript.Data):
pass
class digitalocean_tag(terrascript.Data):
pass
class digitalocean_volume_snapshot(terrascript.Data):
pass
class digitalocean_volume(terrascript.Data):
pass
| 18.702128
| 56
| 0.796359
| 96
| 879
| 7.083333
| 0.25
| 0.35
| 0.391176
| 0.458824
| 0.770588
| 0.452941
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135381
| 879
| 46
| 57
| 19.108696
| 0.894737
| 0.032992
| 0
| 0.482759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.482759
| 0.034483
| 0
| 0.517241
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
ace5700a6e1a8a4f7b4738cf6ee5f0df89d47cf7
| 42
|
py
|
Python
|
content/usr/src/app/examples/env.py
|
jerenius/Tahtoprobe
|
bce3cc439d2d63897ecbffeec820d637dc4cdb46
|
[
"MIT"
] | null | null | null |
content/usr/src/app/examples/env.py
|
jerenius/Tahtoprobe
|
bce3cc439d2d63897ecbffeec820d637dc4cdb46
|
[
"MIT"
] | null | null | null |
content/usr/src/app/examples/env.py
|
jerenius/Tahtoprobe
|
bce3cc439d2d63897ecbffeec820d637dc4cdb46
|
[
"MIT"
] | null | null | null |
import os
print(os.environ['mqttbroker'])
| 14
| 31
| 0.761905
| 6
| 42
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 2
| 32
| 21
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
aced9056be53d5cb3ab9b218adb0c1684e092e11
| 5,429
|
py
|
Python
|
tests/unit/testActionRules/testActionRules.py
|
KIZI/actionrules
|
227e021fa60ce40a1492322fe9bec35f0469e19c
|
[
"MIT"
] | 8
|
2019-10-11T09:49:20.000Z
|
2022-03-21T23:23:55.000Z
|
tests/unit/testActionRules/testActionRules.py
|
hhl60492/actionrules
|
cdd1f58b44278e033d2eed7c603938e29368c9fa
|
[
"MIT"
] | 15
|
2019-12-29T20:14:36.000Z
|
2021-12-10T13:16:00.000Z
|
tests/unit/testActionRules/testActionRules.py
|
KIZI/actionrules
|
227e021fa60ce40a1492322fe9bec35f0469e19c
|
[
"MIT"
] | 7
|
2019-10-10T15:51:36.000Z
|
2022-03-23T00:33:30.000Z
|
import unittest
import pandas as pd
from actionrules.actionRules import ActionRules
from actionrules.desiredState import DesiredState
class TestActionRules(unittest.TestCase):
def setUp(self):
self.actionRulesDiscoveryEmptyNotNan = ActionRules([pd.DataFrame()],
[pd.DataFrame()],
[pd.DataFrame()],
DesiredState(),
[pd.Series()],
[pd.Series()])
self.actionRulesDiscoveryEmptyNan = ActionRules([pd.DataFrame()],
[pd.DataFrame()],
[pd.DataFrame()],
DesiredState(),
[pd.Series()],
[pd.Series()],
True)
def test_is_action_couple_when_stable_candidate_not_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '0', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_not_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '1', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_not_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('nan', '1', "stable")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_not_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '0', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_not_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '1', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0', '1'), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_not_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('nan', '1', "flexible")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_stable_candidate_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '0', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '1', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_stable_candidate_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('nan', '1', "stable")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('1*',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '0', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '1', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0', '1'), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('nan', '1', "flexible")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('None', '1'), False)
self.assertEqual(expected, result)
def test_get_uplift(self):
result = self.actionRulesDiscoveryEmptyNan._get_uplift(0.2, 0.8, 0.8)
expected = 0.15
self.assertAlmostEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| 52.708738
| 95
| 0.634371
| 575
| 5,429
| 5.613913
| 0.097391
| 0.089219
| 0.104089
| 0.055762
| 0.883829
| 0.883829
| 0.883829
| 0.883829
| 0.861214
| 0.861214
| 0
| 0.009282
| 0.265795
| 5,429
| 102
| 96
| 53.22549
| 0.800552
| 0.14883
| 0
| 0.424658
| 0
| 0
| 0.029724
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 1
| 0.191781
| false
| 0
| 0.054795
| 0
| 0.260274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
acedd7a0944d8784bf933d66fe77b0849856dd14
| 1,083
|
py
|
Python
|
gQuant/plugins/cusignal_plugin/setup.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/setup.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/setup.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
'''
Greenflow Cusignal Plugin
'''
from setuptools import setup, find_packages
setup(
name='greenflow_cusignal_plugin',
version='1.0',
description='greenflow cusignal plugin - RAPIDS Cusignal Nodes for Greenflow', # noqa: E501
install_requires=["greenflow", "cusignal"],
packages=find_packages(include=['greenflow_cusignal_plugin',
'greenflow_cusignal_plugin.*']),
entry_points={
'greenflow.plugin': [
'greenflow_cusignal_plugin = greenflow_cusignal_plugin',
'greenflow_cusignal_plugin.convolution = greenflow_cusignal_plugin.convolution', # noqa: E501
'greenflow_cusignal_plugin.filtering = greenflow_cusignal_plugin.filtering', # noqa: E501
'greenflow_cusignal_plugin.gensig = greenflow_cusignal_plugin.gensig', # noqa: E501
'greenflow_cusignal_plugin.spectral_analysis = greenflow_cusignal_plugin.spectral_analysis', # noqa: E501
'greenflow_cusignal_plugin.windows = greenflow_cusignal_plugin.windows' # noqa: E501
],
}
)
| 45.125
| 118
| 0.697138
| 104
| 1,083
| 6.913462
| 0.288462
| 0.425591
| 0.543811
| 0.161335
| 0.417246
| 0.168289
| 0.104312
| 0
| 0
| 0
| 0
| 0.023364
| 0.209603
| 1,083
| 23
| 119
| 47.086957
| 0.816589
| 0.084949
| 0
| 0
| 0
| 0
| 0.617587
| 0.497955
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
acf5088785c6f06655057edb0c495d78fce0fca9
| 33
|
py
|
Python
|
tests/__init__.py
|
lakshayarora476/TSIClient
|
8d911d8beac3259d0fe86446e6526c1c8d53b74f
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
lakshayarora476/TSIClient
|
8d911d8beac3259d0fe86446e6526c1c8d53b74f
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
lakshayarora476/TSIClient
|
8d911d8beac3259d0fe86446e6526c1c8d53b74f
|
[
"MIT"
] | null | null | null |
from TSIClient import TSIClient
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 1
| 33
| 33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a16fce57e525d7542a7b15a19451f6a8ffc1df0
| 304
|
py
|
Python
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | 4
|
2020-10-27T16:18:57.000Z
|
2020-12-01T10:58:19.000Z
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-18T14:04:10.000Z
|
2020-11-18T15:29:44.000Z
|
tests/data/project1/file1.py
|
Polyconseil/check-oldies
|
0d0d9632281d14f652e71ac2c0db3b0cbf9b089c
|
[
"BSD-3-Clause"
] | null | null | null |
# TIMEBOMB: report me
a = 1 # TIMEBOMB (jsmith): report me
# TIMEBOMB: do not report me (pragma). # no-check-fixmes
# TIMEBOMB(jsmith - 2020-04-25): report me
a = "TIMEBOMB" # do not report me (within a string)
a = "TIMEBOMB" # do not report me (within a string)
# TIMEBOMB - FEWTURE-BOOM: report me
| 33.777778
| 56
| 0.680921
| 48
| 304
| 4.3125
| 0.395833
| 0.270531
| 0.188406
| 0.275362
| 0.439614
| 0.338164
| 0.338164
| 0.338164
| 0.338164
| 0
| 0
| 0.036735
| 0.194079
| 304
| 8
| 57
| 38
| 0.808163
| 0.815789
| 0
| 0.666667
| 0
| 0
| 0.340426
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a1833f2e874f31cd4232fbe5bda65f63de3b0ac
| 43
|
py
|
Python
|
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | null | null | null |
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T11:32:59.000Z
|
2021-06-24T11:32:59.000Z
|
src/audisto_exporter/__init__.py
|
ZeitOnline/audisto_exporter
|
9d1b1771c9ec38f0c512f4736b97fd7f3432e904
|
[
"BSD-3-Clause"
] | null | null | null |
from audisto_exporter.exporter import main
| 21.5
| 42
| 0.883721
| 6
| 43
| 6.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a254d47f70fe89c5643e5985dd52d55f47dfd33
| 42
|
py
|
Python
|
hpvm/projects/predtuner/predtuner/approxes/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/predtuner/predtuner/approxes/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/predtuner/predtuner/approxes/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
from .approxes import get_knobs_from_file
| 21
| 41
| 0.880952
| 7
| 42
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c59362cab4594afa05e3f2674aeb5ede17f7bfc4
| 25
|
py
|
Python
|
recport/__init__.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | null | null | null |
recport/__init__.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | null | null | null |
recport/__init__.py
|
CircleOnCircles/recport
|
371f8af612f7a0787eab9267ffe65f372c7badb2
|
[
"MIT"
] | 1
|
2020-02-03T13:52:22.000Z
|
2020-02-03T13:52:22.000Z
|
from .portfolio import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
681919366bd3448b7c6281a35f618ace8e1ef4be
| 2,462
|
py
|
Python
|
tests/system/action/meeting/test_replace_projector_id.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
tests/system/action/meeting/test_replace_projector_id.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | 19
|
2021-11-22T16:25:54.000Z
|
2021-11-25T13:38:13.000Z
|
tests/system/action/meeting/test_replace_projector_id.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
from tests.system.action.base import BaseActionTestCase
class MeetingReplaceProjectorIdTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.set_models(
{
"meeting/1": {
"default_projector_$_id": ["motion"],
"default_projector_$motion_id": 11,
"reference_projector_id": 20,
"is_active_in_organization_id": 1,
},
"projector/11": {
"used_as_default_$motion_in_meeting_id": 1,
"used_as_default_$_in_meeting_id": ["motion"],
},
"projector/20": {
"used_as_reference_projector_meeting_id": 1,
},
}
)
def test_replacing(self) -> None:
response = self.request(
"meeting.replace_projector_id", {"id": 1, "projector_id": 11}
)
self.assert_status_code(response, 200)
meeting = self.get_model("meeting/1")
assert meeting.get("default_projector_$_id") == ["motion"]
assert meeting.get("default_projector_$motion_id") == 20
assert meeting.get("reference_projector_id") == 20
projector_11 = self.get_model("projector/11")
assert projector_11.get("used_as_default_$motion_in_meeting_id") is None
projector_20 = self.get_model("projector/20")
assert projector_20.get("used_as_reference_projector_meeting_id") == 1
assert projector_20.get("used_as_default_$motion_in_meeting_id") == 1
assert projector_20.get("used_as_default_$_in_meeting_id") == ["motion"]
def test_no_replacing(self) -> None:
response = self.request(
"meeting.replace_projector_id", {"id": 1, "projector_id": 12}
)
self.assert_status_code(response, 200)
meeting = self.get_model("meeting/1")
assert meeting.get("default_projector_$_id") == ["motion"]
assert meeting.get("default_projector_$motion_id") == 11
assert meeting.get("reference_projector_id") == 20
projector_11 = self.get_model("projector/11")
assert projector_11.get("used_as_default_$motion_in_meeting_id") == 1
assert projector_11.get("used_as_default_$_in_meeting_id") == ["motion"]
projector_20 = self.get_model("projector/20")
assert projector_20.get("used_as_reference_projector_meeting_id") == 1
| 41.728814
| 80
| 0.611698
| 278
| 2,462
| 5
| 0.158273
| 0.079137
| 0.065468
| 0.057554
| 0.814388
| 0.794245
| 0.794245
| 0.768345
| 0.731655
| 0.680576
| 0
| 0.037862
| 0.270512
| 2,462
| 58
| 81
| 42.448276
| 0.73608
| 0
| 0
| 0.32
| 0
| 0
| 0.33225
| 0.266044
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.06
| false
| 0
| 0.02
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a85ace768f4d863824637747325b058794211789
| 2,882
|
py
|
Python
|
karp/tests/integration/test_sql_uow.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | 1
|
2021-12-08T15:33:42.000Z
|
2021-12-08T15:33:42.000Z
|
karp/tests/integration/test_sql_uow.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | null | null | null |
karp/tests/integration/test_sql_uow.py
|
spraakbanken/karp-backend-v6-tmp
|
e5b78157bd999df18c188973ae2a337015b6f35d
|
[
"MIT"
] | null | null | null |
import pytest
from karp.domain import model
from karp.infrastructure.sql import sql_unit_of_work
from karp.utility import unique_id
def random_resource() -> model.Resource:
return model.Resource(
entity_id=unique_id.make_unique_id(),
resource_id="abc",
name="ABC",
config={"fields": {}},
message="added",
)
def random_entry(resource_id: str = None) -> model.Entry:
return model.Entry(
entity_id=unique_id.make_unique_id(),
entry_id="abc..1",
resource_id=resource_id or "abc",
body={"id": "abc..1"},
message="added",
)
class TestSqlResourceUnitOfWork:
def test_rolls_back_uncommitted_work_by_default(self, sqlite_session_factory):
uow = sql_unit_of_work.SqlResourceUnitOfWork(sqlite_session_factory)
with uow:
resource = random_resource()
uow.resources.put(resource)
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "resources"'))
assert rows == []
def test_rolls_back_on_error(self, sqlite_session_factory):
class MyException(Exception):
pass
uow = sql_unit_of_work.SqlResourceUnitOfWork(sqlite_session_factory)
with pytest.raises(MyException):
with uow:
resource = random_resource()
uow.resources.put(resource)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "resources"'))
assert rows == []
class TestSqlEntryUnitOfWork:
def test_rolls_back_uncommitted_work_by_default(self, sqlite_session_factory):
uow = sql_unit_of_work.SqlEntryUnitOfWork(
{"resource_id": "abc", "table_name": "abc"},
resource_config={"resource_id": "abc", "config": {}},
session_factory=sqlite_session_factory,
)
with uow:
entry = random_entry(resource_id="abc")
uow.entries.put(entry)
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "resources"'))
assert rows == []
def test_rolls_back_on_error(self, sqlite_session_factory):
class MyException(Exception):
pass
uow = sql_unit_of_work.SqlEntryUnitOfWork(
{"resource_id": "abc", "table_name": "abc"},
resource_config={"resource_id": "abc", "config": {}},
session_factory=sqlite_session_factory,
)
with pytest.raises(MyException):
with uow:
entry = random_entry(resource_id="abc")
uow.entries.put(entry)
raise MyException()
new_session = sqlite_session_factory()
rows = list(new_session.execute('SELECT * FROM "resources"'))
assert rows == []
| 32.75
| 82
| 0.628036
| 315
| 2,882
| 5.431746
| 0.209524
| 0.114553
| 0.140269
| 0.037989
| 0.78083
| 0.78083
| 0.78083
| 0.748101
| 0.748101
| 0.66277
| 0
| 0.000946
| 0.266482
| 2,882
| 87
| 83
| 33.126437
| 0.80842
| 0
| 0
| 0.714286
| 0
| 0
| 0.082929
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 1
| 0.085714
| false
| 0.028571
| 0.057143
| 0.028571
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a86c86de616a09799680ced1a9ed9952c4d86b29
| 38
|
py
|
Python
|
src/automagic_imaging/scripts/__init__.py
|
univ-of-utah-marriott-library-apple/radmind_auto_image_creator
|
84ede339c7e060068bba91d627d10d7d15fc743e
|
[
"MIT"
] | 2
|
2015-06-25T05:33:23.000Z
|
2018-03-04T06:11:54.000Z
|
src/automagic_imaging/scripts/__init__.py
|
univ-of-utah-marriott-library-apple/radmind_auto_image_creator
|
84ede339c7e060068bba91d627d10d7d15fc743e
|
[
"MIT"
] | null | null | null |
src/automagic_imaging/scripts/__init__.py
|
univ-of-utah-marriott-library-apple/radmind_auto_image_creator
|
84ede339c7e060068bba91d627d10d7d15fc743e
|
[
"MIT"
] | null | null | null |
import logger, parse_options, radmind
| 19
| 37
| 0.842105
| 5
| 38
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a8b2d188a08bbe1de71b93b1c3c0339bf1b59b6a
| 29
|
py
|
Python
|
plugins/pelican_alias/__init__.py
|
fferegrino/yet-another-blog-migration
|
1e7e95768af0d86d0a890b4582ef70c44b995e8e
|
[
"Apache-2.0"
] | null | null | null |
plugins/pelican_alias/__init__.py
|
fferegrino/yet-another-blog-migration
|
1e7e95768af0d86d0a890b4582ef70c44b995e8e
|
[
"Apache-2.0"
] | 36
|
2019-04-30T22:01:52.000Z
|
2019-08-15T18:01:36.000Z
|
plugins/pelican_alias/__init__.py
|
fferegrino/yet-another-blog-migration
|
1e7e95768af0d86d0a890b4582ef70c44b995e8e
|
[
"Apache-2.0"
] | null | null | null |
from .pelican_alias import *
| 14.5
| 28
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a8d3ffefb6a36b0e99d1e3b83f06a466e6e4b38a
| 535
|
py
|
Python
|
tests/response/test_generate_link_response.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2021-02-09T14:44:55.000Z
|
2021-02-09T14:44:55.000Z
|
tests/response/test_generate_link_response.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | null | null | null |
tests/response/test_generate_link_response.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2022-01-26T18:09:33.000Z
|
2022-01-26T18:09:33.000Z
|
DOCS_EXAMPLE_GENERATE_LINK_RESPONSE = {
"link": "https://connect.lakey_finicity.com?analytics=google%3AUA-123456789-1&consumerId=1cb21a38d006a384cf0376d12f2ddfef&customerId=12345678&partnerId=12345678921234&redirectUri=https%3A%2F%2Fwww.lakey_finicity.com&signature=b5fd21d087c06d0a7442b6fa85e7added988b057ab11717b7e37f650878b2dd2×tamp=1564070902134&type=voa&webhook=https%3A%2F%2Facme-lending.com/webhook&webhookContentType=application%2Fjson&webhookData=%7B%22value1%22%3A%22123456789%22%2C%22value2%22%3A%2201Mar19%22%7D"
}
| 133.75
| 492
| 0.865421
| 61
| 535
| 7.491803
| 0.754098
| 0.056893
| 0.070022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277567
| 0.016822
| 535
| 3
| 493
| 178.333333
| 0.591255
| 0
| 0
| 0
| 1
| 0.333333
| 0.902804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8d551833bc2154a534b7f5f037dac6d48c89659
| 603
|
py
|
Python
|
cancer-immune/EMEWS-scripts/python/stats.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | null | null | null |
cancer-immune/EMEWS-scripts/python/stats.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | null | null | null |
cancer-immune/EMEWS-scripts/python/stats.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | 2
|
2019-05-24T02:42:11.000Z
|
2021-07-12T12:19:46.000Z
|
import statistics
import builtins
def min(vals):
fl = [v for x in vals for v in x if v != 9999999999]
if len(fl) == 0:
return -1
return builtins.min(fl)
def max(vals):
fl = [v for x in vals for v in x if v != 9999999999]
if len(fl) == 0:
return -1
return builtins.max(fl)
def mean(vals):
fl = [v for x in vals for v in x if v != 9999999999]
if len(fl) == 0:
return -1
return statistics.mean(fl)
def std(vals):
fl = [v for x in vals for v in x if v != 9999999999]
if len(fl) == 0:
return -1
return statistics.pstdev(fl)
| 22.333333
| 56
| 0.575456
| 108
| 603
| 3.212963
| 0.194444
| 0.069164
| 0.080692
| 0.115274
| 0.772334
| 0.772334
| 0.772334
| 0.772334
| 0.772334
| 0.772334
| 0
| 0.115663
| 0.311774
| 603
| 26
| 57
| 23.192308
| 0.720482
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
763e28bdaae93c07326b7b64c69db34d0ba6e1fc
| 6,336
|
py
|
Python
|
starry/_core/ops/integration.py
|
shashankdholakia/starry
|
5619cc9823651a69f1230ead8fc87eb75a9d682e
|
[
"MIT"
] | null | null | null |
starry/_core/ops/integration.py
|
shashankdholakia/starry
|
5619cc9823651a69f1230ead8fc87eb75a9d682e
|
[
"MIT"
] | null | null | null |
starry/_core/ops/integration.py
|
shashankdholakia/starry
|
5619cc9823651a69f1230ead8fc87eb75a9d682e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ...compat import Apply, Op, tt
import numpy as np
__all__ = ["sTOp", "rTReflectedOp", "sTReflectedOp"]
class sTOp(Op):
def __init__(self, func, N):
self.func = func
self.N = N
self._grad_op = sTGradientOp(self)
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [tt.TensorType(inputs[-1].dtype, (False, False))()]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return [shapes[0] + (tt.as_tensor(self.N),)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
def perform(self, node, inputs, outputs):
outputs[0][0] = self.func(*inputs)
def grad(self, inputs, gradients):
return self._grad_op(*(inputs + gradients))
class sTGradientOp(Op):
def __init__(self, base_op):
self.base_op = base_op
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [i.type() for i in inputs[:-1]]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return shapes[:-1]
def perform(self, node, inputs, outputs):
bb, br = self.base_op.func(*inputs)
outputs[0][0] = np.reshape(bb, np.shape(inputs[0]))
outputs[1][0] = np.reshape(br, np.shape(inputs[1]))
class rTReflectedOp(Op):
def __init__(self, func, N):
self.func = func
self.N = N
self._grad_op = rTReflectedGradientOp(self)
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return [
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
def perform(self, node, inputs, outputs):
(b, sigr) = inputs
rT, ddb, ddsigr = self.func(b, sigr)
outputs[0][0] = rT
outputs[1][0] = ddb
outputs[2][0] = ddsigr
def grad(self, inputs, gradients):
results = self(*inputs)
grad = self._grad_op(*(inputs + results + [gradients[0]]))
return grad
class rTReflectedGradientOp(Op):
def __init__(self, base_op):
self.base_op = base_op
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [i.type() for i in inputs[:2]]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return shapes[:2]
def perform(self, node, inputs, outputs):
b, sigr, rT, ddb, ddsigr, brT = inputs
bb = (brT * ddb).sum(-1)
bsigr = (brT * ddsigr).sum()
outputs[0][0] = np.reshape(bb, np.shape(b))
outputs[1][0] = np.array(np.reshape(bsigr, np.shape(sigr)))
class sTReflectedOp(Op):
def __init__(self, func, N):
self.func = func
self.N = N
self._grad_op = sTReflectedGradientOp(self)
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
tt.TensorType(inputs[-1].dtype, (False, False))(),
]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return [
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
shapes[0] + (tt.as_tensor(self.N),),
]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
def perform(self, node, inputs, outputs):
b, theta, bo, ro, sigr = inputs
sT, ddb, ddtheta, ddbo, ddro, ddsigr = self.func(
b, theta, bo, ro, sigr
)
outputs[0][0] = sT
outputs[1][0] = ddb
outputs[2][0] = ddtheta
outputs[3][0] = ddbo
outputs[4][0] = ddro
outputs[5][0] = ddsigr
def grad(self, inputs, gradients):
results = self(*inputs)
grad = self._grad_op(*(inputs + results + [gradients[0]]))
return grad
class sTReflectedGradientOp(Op):
def __init__(self, base_op):
self.base_op = base_op
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [i.type() for i in inputs[:5]]
return Apply(self, inputs, outputs)
def infer_shape(self, *args):
shapes = args[-1]
return shapes[:5]
def perform(self, node, inputs, outputs):
(
b,
theta,
bo,
ro,
sigr,
sT,
ddb,
ddtheta,
ddbo,
ddro,
ddsigr,
bsT,
) = inputs
bb = (bsT * ddb).sum(-1)
btheta = (bsT * ddtheta).sum(-1)
bbo = (bsT * ddbo).sum(-1)
bro = (bsT * ddro).sum()
bsigr = (bsT * ddsigr).sum()
outputs[0][0] = np.reshape(bb, np.shape(b))
outputs[1][0] = np.reshape(btheta, np.shape(theta))
outputs[2][0] = np.reshape(bbo, np.shape(bo))
outputs[3][0] = np.array(np.reshape(bro, np.shape(ro)))
outputs[4][0] = np.array(np.reshape(bsigr, np.shape(sigr)))
| 30.608696
| 69
| 0.548927
| 828
| 6,336
| 4.080918
| 0.100242
| 0.059189
| 0.047351
| 0.05623
| 0.808523
| 0.784256
| 0.759692
| 0.747558
| 0.72773
| 0.709086
| 0
| 0.018485
| 0.299874
| 6,336
| 206
| 70
| 30.757282
| 0.743237
| 0.003314
| 0
| 0.586826
| 0
| 0
| 0.004752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179641
| false
| 0
| 0.011976
| 0.005988
| 0.353293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
767a3ccb69f855fe3fdb99e127c48479410a52ae
| 399
|
py
|
Python
|
tests/test_models.py
|
softformance/django-facebook-photo-api
|
0750140f322a195d69e7fb64c8792efe3f75f073
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
softformance/django-facebook-photo-api
|
0750140f322a195d69e7fb64c8792efe3f75f073
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
softformance/django-facebook-photo-api
|
0750140f322a195d69e7fb64c8792efe3f75f073
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_Django-Facebook-photo-api
------------
Tests for `Django-Facebook-photo-api` models module.
"""
from django.test import TestCase
from django_facebook_photo_api import models
class TestDjango_facebook_photo_api(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| 15.346154
| 52
| 0.66416
| 51
| 399
| 5.039216
| 0.529412
| 0.202335
| 0.249027
| 0.256809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003135
| 0.200501
| 399
| 25
| 53
| 15.96
| 0.802508
| 0.350877
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.222222
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
767c925602ef009b0d9a443b436a4c5d856ed7ef
| 11,860
|
py
|
Python
|
plgx-esp/tests/test_functional/test_logging.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp/tests/test_functional/test_logging.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp/tests/test_functional/test_logging.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | 2
|
2021-11-12T10:25:02.000Z
|
2022-03-30T06:33:52.000Z
|
import datetime as dt
import gzip
import io
import json
from flask import url_for
from polylogyx.db.models import ResultLog
import time
class TestLogging:
def test_bad_post_request(self, node, testapp):
resp = testapp.post(url_for("api.logger"), {"foo": "bar"}, expect_errors=True)
assert not resp.normal_body
def test_missing_node_key(self, node, testapp):
resp = testapp.post_json(
url_for("api.logger"), {"foo": "bar"}, expect_errors=True
)
assert not resp.normal_body
# assert resp.json == {'node_invalid': True}
def test_status_log_created_for_node(self, node, testapp):
data = {
"line": 1,
"message": "This is a test of the emergency broadcast system.",
"severity": 1,
"filename": "foobar.cpp",
}
assert not node.status_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": [data],
"log_type": "status",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
assert node.status_logs.count()
assert node.status_logs[0].line == data["line"]
assert node.status_logs[0].message == data["message"]
assert node.status_logs[0].severity == data["severity"]
assert node.status_logs[0].filename == data["filename"]
assert node.last_ip == "127.0.0.2"
def test_status_log_created_for_node_put(self, node, testapp):
data = {
"line": 1,
"message": "This is a test of the emergency broadcast system.",
"severity": 1,
"filename": "foobar.cpp",
}
assert not node.status_logs.count()
resp = testapp.put_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": [data],
"log_type": "status",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
assert node.status_logs.count()
assert node.status_logs[0].line == data["line"]
assert node.status_logs[0].message == data["message"]
assert node.status_logs[0].severity == data["severity"]
assert node.status_logs[0].filename == data["filename"]
assert node.last_ip == "127.0.0.2"
def test_status_log_created_for_node_when_gzipped(self, node, testapp):
data = {
"line": 1,
"message": "This is a test of the emergency broadcast system.",
"severity": 1,
"filename": "foobar.cpp",
}
assert not node.status_logs.count()
fileobj = io.BytesIO()
gzf = gzip.GzipFile(fileobj=fileobj, mode="wb")
gzf.write(
json.dumps(
{
"node_key": node.node_key,
"data": [data],
"log_type": "status",
}
).encode("utf-8")
)
gzf.close()
resp = testapp.post(
url_for("api.logger"),
fileobj.getvalue(),
headers={"Content-Encoding": "gzip", "Content-Type": "application/json"},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
assert node.status_logs.count()
assert node.status_logs[0].line == data["line"]
assert node.status_logs[0].message == data["message"]
assert node.status_logs[0].severity == data["severity"]
assert node.status_logs[0].filename == data["filename"]
assert node.last_ip == "127.0.0.2"
def test_no_status_log_created_when_data_is_empty(self, node, testapp,celery_worker):
assert not node.status_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": [],
"log_type": "status",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
assert not node.status_logs.count()
assert node.last_ip == "127.0.0.2"
def test_result_log_created_for_node(self,testapp,db,node,celery_worker):
now = dt.datetime.utcnow()
data = [
{
"diffResults": {
"added": [
{
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
}
],
"removed": [
{
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97650",
}
],
},
"name": "processes",
"hostIdentifier": "hostname.local",
"calendarTime": "%s %s" % (now.ctime(), "UTC"),
"unixTime": now.strftime("%s"),
}
]
assert not node.result_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": data,
"log_type": "result",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
time.sleep(5)
assert node.result_logs.count() == 2
assert node.last_ip == "127.0.0.2"
added = ResultLog.query.filter(ResultLog.node==node).filter(ResultLog.action=="added").first()
removed = ResultLog.query.filter(ResultLog.node==node).filter(ResultLog.action=="removed").first()
assert added.name == data[0]["name"]
assert added.columns == data[0]["diffResults"]["added"][0]
assert removed.name == data[0]["name"]
assert removed.columns == data[0]["diffResults"]["removed"][0]
def test_no_result_log_created_when_data_is_empty(self, node, testapp):
assert not node.result_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": [],
"log_type": "result",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
assert not node.result_logs.count()
# assert node.last_ip == "127.0.0.2"
def test_result_event_format(self,testapp,db,node,celery_worker):
now = dt.datetime.utcnow()
calendarTime = "%s %s" % (now.ctime(), "UTC")
unixTime = now.strftime("%s")
data = [
{
"action": "added",
"columns": {
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
},
"name": "osquery",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
{
"action": "removed",
"columns": {
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
},
"name": "osquery",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
{
"action": "added",
"columns": {
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
},
"name": "processes",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
{
"action": "removed",
"columns": {
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
},
"name": "processes",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
]
assert not node.result_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": data,
"log_type": "result",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
time.sleep(5)
assert node.result_logs.count() == 4
assert node.last_ip == "127.0.0.2"
added = ResultLog.query.filter(ResultLog.node==node).filter(ResultLog.action=="added").count()
removed = ResultLog.query.filter(ResultLog.node==node).filter(ResultLog.action=="removed").count()
assert added == 2
assert removed == 2
def test_heterogeneous_result_format(self,testapp,db,node,celery_worker):
now = dt.datetime.utcnow()
calendarTime = "%s %s" % (now.ctime(), "UTC")
unixTime = now.strftime("%s")
data = [
{
"action": "removed",
"columns": {
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
},
"name": "processes",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
{
"diffResults": {
"added": [
{
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97830",
}
],
"removed": [
{
"name": "osqueryd",
"path": "/usr/local/bin/osqueryd",
"pid": "97650",
}
],
},
"name": "processes",
"hostIdentifier": "hostname.local",
"calendarTime": calendarTime,
"unixTime": unixTime,
},
{
"calendarTime": calendarTime,
"unixTime": unixTime,
"action": "snapshot",
"snapshot": [
{"parent": "0", "path": "/sbin/launchd", "pid": "1"},
{"parent": "1", "path": "/usr/sbin/syslogd", "pid": "51"},
{"parent": "1", "path": "/usr/libexec/UserEventAgent", "pid": "52"},
{"parent": "1", "path": "/usr/libexec/kextd", "pid": "54"},
],
"name": "process_snapshot",
"name": "file_events",
"hostIdentifier": "hostname.local",
},
]
assert not node.result_logs.count()
resp = testapp.post_json(
url_for("api.logger"),
{
"node_key": node.node_key,
"data": data,
"log_type": "result",
},
extra_environ=dict(REMOTE_ADDR="127.0.0.2"),
expect_errors=True,
)
time.sleep(10)
assert node.result_logs.count() == 7
assert node.last_ip == "127.0.0.2"
| 33.314607
| 106
| 0.455902
| 1,095
| 11,860
| 4.787215
| 0.136073
| 0.049599
| 0.053415
| 0.018314
| 0.857306
| 0.825258
| 0.817245
| 0.802938
| 0.795116
| 0.774514
| 0
| 0.026387
| 0.405649
| 11,860
| 356
| 107
| 33.314607
| 0.717265
| 0.006492
| 0
| 0.622581
| 0
| 0
| 0.184704
| 0.019862
| 0
| 0
| 0
| 0
| 0.13871
| 1
| 0.032258
| false
| 0
| 0.022581
| 0
| 0.058065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7696a576f92f3e242bcfaf5fa51c1343f1aaa4d7
| 231
|
py
|
Python
|
doac/exceptions/invalid_client.py
|
EE/doac
|
ffc26fe8222e61cadbaa138b7e36d749de663e68
|
[
"MIT"
] | 19
|
2015-01-02T12:16:59.000Z
|
2018-10-10T14:56:03.000Z
|
doac/exceptions/invalid_client.py
|
EE/doac
|
ffc26fe8222e61cadbaa138b7e36d749de663e68
|
[
"MIT"
] | 2
|
2015-05-28T17:29:34.000Z
|
2016-05-24T15:50:30.000Z
|
doac/exceptions/invalid_client.py
|
EE/doac
|
ffc26fe8222e61cadbaa138b7e36d749de663e68
|
[
"MIT"
] | 10
|
2015-03-03T10:37:44.000Z
|
2018-10-10T14:56:10.000Z
|
from .base import InvalidClient
class ClientDoesNotExist(InvalidClient):
reason = "The client was malformed or invalid."
class ClientSecretNotValid(InvalidClient):
reason = "The client secret was malformed or invalid."
| 23.1
| 58
| 0.774892
| 25
| 231
| 7.16
| 0.6
| 0.212291
| 0.24581
| 0.312849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160173
| 231
| 9
| 59
| 25.666667
| 0.92268
| 0
| 0
| 0
| 0
| 0
| 0.341991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
76a91d830647a56c5c9c2cc5585274603655152f
| 187
|
py
|
Python
|
tests/conftest.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 41
|
2015-02-20T00:35:39.000Z
|
2022-03-15T13:54:13.000Z
|
tests/conftest.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 68
|
2015-01-09T18:07:38.000Z
|
2021-10-06T16:30:43.000Z
|
tests/conftest.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 8
|
2015-02-20T00:35:48.000Z
|
2021-10-30T14:09:03.000Z
|
from discoursegraphs.corpora import pcc
def pytest_namespace():
"""these objects/variables are available to all tests in the test suite"""
return {'maz_1423': pcc['maz-1423']}
| 23.375
| 78
| 0.721925
| 26
| 187
| 5.115385
| 0.884615
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.165775
| 187
| 7
| 79
| 26.714286
| 0.801282
| 0.363636
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f4cf462380fbc20db6f843ec67d6921e87dff07
| 203
|
py
|
Python
|
mksc/feature/values/__init__.py
|
HelloCoyen/mksc2
|
ede038b87b7a46c2872ac9ae744c4dbfe5d6fe48
|
[
"MIT"
] | null | null | null |
mksc/feature/values/__init__.py
|
HelloCoyen/mksc2
|
ede038b87b7a46c2872ac9ae744c4dbfe5d6fe48
|
[
"MIT"
] | null | null | null |
mksc/feature/values/__init__.py
|
HelloCoyen/mksc2
|
ede038b87b7a46c2872ac9ae744c4dbfe5d6fe48
|
[
"MIT"
] | null | null | null |
from .abnormal import fix_abnormal_value
from .missing import fix_missing_value
from .normalization import normalization
from .scale import fix_scaling
from .standard import fix_standard, logarithmetics
| 33.833333
| 50
| 0.866995
| 27
| 203
| 6.296296
| 0.407407
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 203
| 5
| 51
| 40.6
| 0.934066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96d84256259e44d892215cc82e4e515b4941110a
| 26,300
|
py
|
Python
|
saas/add_env.py
|
kerven88/opsany-paas
|
78b83d0b6a46f3e70226ca99992d736b2af0af72
|
[
"Apache-2.0"
] | null | null | null |
saas/add_env.py
|
kerven88/opsany-paas
|
78b83d0b6a46f3e70226ca99992d736b2af0af72
|
[
"Apache-2.0"
] | null | null | null |
saas/add_env.py
|
kerven88/opsany-paas
|
78b83d0b6a46f3e70226ca99992d736b2af0af72
|
[
"Apache-2.0"
] | null | null | null |
"""
mysql-connector==2.2.9
SQLAlchemy==1.4.22
"""
import os
import sys
import datetime
import configparser
from sqlalchemy import Column, DateTime, ForeignKey, String, create_engine, Index
from sqlalchemy.dialects.mysql import INTEGER, LONGTEXT, SMALLINT, TINYINT
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from urllib import parse
# change dir to install
os.chdir('../install')
# testify file if exists
if not os.path.exists('install.config'):
sys.exit('install config is not exists.')
read_install_config = configparser.ConfigParser()
try:
read_install_config.read('install.config')
config_dict = dict(read_install_config)
except Exception as e:
print(e)
sys.exit('file context is wrong.')
def replace_str(data):
if not data:
return None
return data.replace("\"", "").replace("\'", "")
MYSQL_SERVER_IP = replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP", "127.0.0.1"))
MYSQL_ROOT_PASSWORD = replace_str(config_dict.get("mysql").get("MYSQL_ROOT_PASSWORD", "OpsAny@2020"))
try:
db = create_engine("mysql+mysqlconnector://root:{}@{}/opsany_paas".format(parse.quote_plus(MYSQL_ROOT_PASSWORD), MYSQL_SERVER_IP))
Base = declarative_base(db)
def to_dict(self):
return {c.name: getattr(self, c.name, None)
for c in self.__table__.columns}
Base.to_dict = to_dict
except Exception as e:
print("Script error: {}".format(str(e)))
sys.exit('connect sql is failed. Please check mysql server!')
envs = [
{
"app_code": "cmdb",
"env": [
# CMDB count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CMDB_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CMDB_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "cmp",
"env": [
# CMP count 7
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CMP_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CMP_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "job",
"env": [
# JOB count 10
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_JOB_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "FILE_ROOT", "value": replace_str(config_dict.get('opsany_saas').get("FILE_ROOT")), "env_scope": "all", "intro": "Salt file root"},
{"key": "PILLAR_ROOT", "value": replace_str(config_dict.get('opsany_saas').get("PILLAR_ROOT")), "env_scope": "all", "intro": "Salt pillar root"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_JOB_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "workbench",
"env": [
# WORKBENCH count 7
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_WORKBENCH_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_WORKBENCH_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
]
},{
"app_code": "rbac",
"env": [
# RBAC count 4
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_RBAC_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
]
},{
"app_code": "monitor",
"env": [
# MONITOR count 10
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_MONITOR_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_MONITOR_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "ELASTIC_SEARCH_USERNAME", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_SEARCH_USERNAME")), "env_scope": "all", "intro": "es username"},
{"key": "ES_PASSWORD", "value": replace_str(config_dict.get('elasticsearch').get("ES_PASSWORD")), "env_scope": "all", "intro": "es password"},
{"key": "ES_SERVER_IP", "value": replace_str(config_dict.get('elasticsearch').get("ES_SERVER_IP")), "env_scope": "all", "intro": "es host"},
{"key": "ELASTIC_PORT", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_PORT")), "env_scope": "all", "intro": "es port"},
{"key": "ELASTIC_SEARCH_INDEX", "value": replace_str(config_dict.get('elasticsearch').get("ELASTIC_SEARCH_INDEX")), "env_scope": "all", "intro": "es index"},
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
]
},{
"app_code": "control",
"env": [
# CONTROL count 13
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_CONTROL_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_CONTROL_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
{"key": "ROSTER_FILE_URL", "value": replace_str(config_dict.get('opsany_saas').get("ROSTER_FILE_URL")), "env_scope": "all", "intro": "roster file path"},
{"key": "SALT_SSH_FILE_URL", "value": replace_str(config_dict.get('opsany_saas').get("SALT_SSH_FILE_URL")), "env_scope": "all", "intro": "salt ssh file path"},
{"key": "ANSIBLE_HOST_KEY_CHECKING", "value": replace_str(config_dict.get("opsany_saas").get("ANSIBLE_HOST_KEY_CHECKING")), "env_scope": "all", "intro": "ansible vs host checking"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "devops",
"env": [
# devops count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},{
"app_code": "bastion",
"env": [
# bastion count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_BASTION_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "REDIS_HOST", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_IP")), "env_scope": "all", "intro": "redis host"},
{"key": "REDIS_PORT", "value": replace_str(config_dict.get("redis").get("REDIS_PORT")), "env_scope": "all", "intro": "redis port"},
{"key": "REDIS_PASSWORD", "value": replace_str(config_dict.get("redis").get("REDIS_SERVER_PASSWORD")), "env_scope": "all", "intro": "redis password"},
{"key": "TERMINAL_TIMEOUT", "value": replace_str(config_dict.get("redis").get("TERMINAL_TIMEOUT")), "env_scope": "all", "intro": "terminal timeout"},
]
},
{
"app_code": "deploy",
"env": [
# devops count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},
{
"app_code": "pipeline",
"env": [
# pipeline count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
},
{
"app_code": "repo",
"env": [
# repo count 8
{"key": "UPLOAD_PATH", "value": replace_str(config_dict.get('opsany_saas').get("UPLOAD_PATH")), "env_scope": "all", "intro": "uploads path"},
{"key": "MYSQL_PASSWORD", "value": replace_str(config_dict.get('mysql').get("MYSQL_OPSANY_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mysql password"},
{"key": "MYSQL_HOST", "value": replace_str(config_dict.get('mysql').get("MYSQL_SERVER_IP")), "env_scope": "all", "intro": "mysql host"},
{"key": "MYSQL_PORT", "value": replace_str(config_dict.get('mysql').get("MYSQL_PORT")), "env_scope": "all", "intro": "mysql port"},
{"key": "MONGO_HOST", "value": replace_str(config_dict.get('mongodb').get("MONGO_SERVER_IP")), "env_scope": "all", "intro": "mongo host"},
{"key": "MONGO_PORT", "value": replace_str(config_dict.get('mongodb').get("MONGO_PORT")), "env_scope": "all", "intro": "mongo port"},
{"key": "MONGO_PASSWORD", "value": replace_str(config_dict.get('mongodb').get("MONGO_DEVOPS_PASSWORD")), "env_scope": "all", "intro": "mongo password"},
# {"key": "DEFAULT_USER_ICON", "value": read_install_config.get("DEFAULT_USER_ICON"), "env_scope": "all", "intro": "user default icon"},
]
}
]
class PaasApptag(Base):
__tablename__ = 'paas_apptags'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False, unique=True)
code = Column(String(30), nullable=False, unique=True)
index = Column(INTEGER(11), nullable=False)
class PaasApp(Base):
__tablename__ = 'paas_app'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False, unique=True)
code = Column(String(30), nullable=False, unique=True)
introduction = Column(LONGTEXT, nullable=False)
creater = Column(String(20), nullable=False)
created_date = Column(DateTime, index=True)
state = Column(SMALLINT(6), nullable=False)
is_already_test = Column(TINYINT(1), nullable=False)
is_already_online = Column(TINYINT(1), nullable=False)
first_test_time = Column(DateTime, index=True)
first_online_time = Column(DateTime, index=True)
language = Column(String(50))
auth_token = Column(String(36))
tags_id = Column(ForeignKey('paas_apptags.id'), index=True)
deploy_token = Column(LONGTEXT)
is_use_celery = Column(TINYINT(1), nullable=False)
is_use_celery_beat = Column(TINYINT(1), nullable=False)
is_saas = Column(TINYINT(1), nullable=False)
logo = Column(String(100))
tags = relationship('PaasApptag')
class EngineApp(Base):
__tablename__ = 'engine_apps'
id = Column(INTEGER(11), primary_key=True)
name = Column(String(20), nullable=False)
logo = Column(String(100), nullable=False)
app_code = Column(String(100), nullable=False, unique=True)
app_lang = Column(String(100), nullable=False)
app_type = Column(String(100), nullable=False)
is_active = Column(TINYINT(1), nullable=False)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
class EngineAppEnv(Base):
__tablename__ = 'engine_app_envs'
id = Column(INTEGER(11), primary_key=True)
mode = Column(String(200), nullable=False)
key = Column(String(200), nullable=False)
value = Column(String(200), nullable=False)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
bk_app_id = Column(ForeignKey('engine_apps.id'), nullable=False, index=True)
bk_app = relationship('EngineApp')
class PaasAppEnvvar(Base):
__tablename__ = 'paas_app_envvars'
__table_args__ = (
Index('paas_app_envvars_app_code_36685348c7256adf_uniq', 'app_code', 'mode', 'name', unique=True),
)
id = Column(INTEGER(11), primary_key=True)
app_code = Column(String(30), nullable=False)
mode = Column(String(20), nullable=False)
name = Column(String(50), nullable=False)
value = Column(String(1024), nullable=False)
intro = Column(LONGTEXT)
class AddEnv:
def __init__(self):
cursor = sessionmaker(bind=db)
self.session = cursor()
self.envs = envs
def add_env(self):
for env in self.envs:
app = self.session.query(PaasApp).filter(PaasApp.code==env.get("app_code")).first()
if app:
env_list = env.get("env")
for env_dict in env_list:
key = env_dict.get("key")
value = env_dict.get("value")
env_scope = "prod"
env_query = self.session.query(EngineAppEnv).filter(
EngineAppEnv.bk_app_id==app.id,
EngineAppEnv.key==key
).first()
if not env_query:
create_query = EngineAppEnv(mode=env_scope, key=key, value=value,
created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now(),
bk_app_id=app.id
)
self.session.add(create_query)
self.session.commit()
print("For {} create env info: key={} value={}".format(env.get("app_code"), key, value))
else:
self.session.query(EngineAppEnv).filter(
EngineAppEnv.id==env_query.id).update({
"mode": env_scope,
"key": key,
"value": value,
"updated_at": datetime.datetime.now(),
"bk_app_id": app.id
})
self.session.commit()
print("For {} update env info: key={} value={}".format(env.get("app_code"), key, value))
def add_env_v2(self):
for env in self.envs:
app_code = env.get("app_code")
env_list = env.get("env")
for env_dict in env_list:
env_query = self.session.query(PaasAppEnvvar).filter(
PaasAppEnvvar.app_code==app_code,
PaasAppEnvvar.name==env_dict.get("key")
).first()
if not env_query:
create_query = PaasAppEnvvar(app_code=app_code,
name=env_dict.get("key", ""),
value=env_dict.get("value", ""),
mode=env_dict.get("env_scope", "all"),
intro=env_dict.get("intro", ""),
)
self.session.add(create_query)
self.session.commit()
print("For {} create env info: key={} value={}".format(app_code, env_dict.get("key"),
env_dict.get("value")))
else:
self.session.query(PaasAppEnvvar).filter(
PaasAppEnvvar.id==env_query.id).update({
"mode": env_dict.get("env_scope", "all"),
"name": env_dict.get("key", ""),
"value": env_dict.get("value", ""),
"intro": env_dict.get("intro", ""),
"app_code": app_code,
})
self.session.commit()
print("For {} update env info: key={} value={}".format(app_code, env_dict.get("key"),
env_dict.get("value")))
if __name__ == '__main__':
AddEnv().add_env_v2()
print("ENV INPUT IS DONE, SUCCESS.")
| 69.028871
| 193
| 0.581825
| 3,109
| 26,300
| 4.653265
| 0.070119
| 0.05516
| 0.081358
| 0.117232
| 0.816686
| 0.773415
| 0.732771
| 0.712103
| 0.699938
| 0.684938
| 0
| 0.005687
| 0.231179
| 26,300
| 380
| 194
| 69.210526
| 0.709792
| 0.051559
| 0
| 0.410658
| 0
| 0.053292
| 0.317795
| 0.029511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015674
| false
| 0.087774
| 0.028213
| 0.003135
| 0.238245
| 0.021944
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
96e207eb8a118e7b80e898bcd7a442641ebdb0f6
| 7,810
|
py
|
Python
|
blog/tests/test_views.py
|
robml/django_diy_blog
|
b03d92ccc1208408d0fb907271390741bc9bb101
|
[
"BSD-3-Clause"
] | null | null | null |
blog/tests/test_views.py
|
robml/django_diy_blog
|
b03d92ccc1208408d0fb907271390741bc9bb101
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T00:25:40.000Z
|
2021-09-22T18:39:14.000Z
|
blog/tests/test_views.py
|
robml/django_diy_blog
|
b03d92ccc1208408d0fb907271390741bc9bb101
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django.contrib.auth.models import User
from blog.models import Blog, BlogAuthor
class IndexTest(TestCase):
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/blog/')
self.assertEqual(response.status_code,200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code,200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response, 'index.html')
def test_view_uses_correct_dr_evil_values(self):
response = self.client.get(reverse('index'))
self.assertTrue('num_blogs_dr_evil' in response.context)
real_count = Blog.objects.filter(author__name__username__iexact='dr_evil').count()
self.assertEqual(response.context['num_blogs_dr_evil'],real_count)
class BlogAuthorListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create a user
num_users = 13
for test_user_id in range(num_users):
test_user_id=str(test_user_id)
test_user = User.objects.create_user(username='testuser'+test_user_id,password='testuser'+test_user_id)
test_user.save()
BlogAuthor.objects.create(name=test_user,bio='user#'+test_user_id)
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/blog/bloggers/')
self.assertEqual(response.status_code,200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('blogauthor-list'))
self.assertEqual(response.status_code,200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('blogauthor-list'))
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response, 'blog/blogauthor_list.html')
def test_pagination_is_five(self):
response = self.client.get(reverse('blogauthor-list'))
self.assertEqual(response.status_code,200)
self.assertTrue('is_paginated' in response.context)
self.assertTrue(response.context['is_paginated'] == True)
self.assertTrue(len(response.context['blogauthor_list'])==5)
class BlogListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create a user
num_users = 13
for test_user_id in range(num_users):
test_user_id=str(test_user_id)
test_user = User.objects.create_user(username='testuser'+test_user_id,password='testuser'+test_user_id)
test_user.save()
author = BlogAuthor.objects.create(name=test_user,bio='user#'+test_user_id)
Blog.objects.create(author=author,title="post by user#"+test_user_id,description="test desc")
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/blog/all/')
self.assertEqual(response.status_code,200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('blog-list'))
self.assertEqual(response.status_code,200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('blog-list'))
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response, 'blog/blog_list.html')
def test_pagination_is_five(self):
response = self.client.get(reverse('blog-list'))
self.assertEqual(response.status_code,200)
self.assertTrue('is_paginated' in response.context)
self.assertTrue(response.context['is_paginated'] == True)
self.assertTrue(len(response.context['blog_list'])==5)
class BlogAuthorDetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user = User.objects.create_user(username='testuser1',password='testuser1')
test_user.save()
BlogAuthor.objects.create(name=test_user,bio='user1')
def test_view_url_exists_at_desired_location(self):
id = BlogAuthor.objects.get(id=1).id
response = self.client.get('/blog/bloggers/'+str(id))
self.assertEqual(response.status_code,200)
def test_view_url_accessible_by_name(self):
id = BlogAuthor.objects.get(id=1).id
response = self.client.get(reverse('blogauthor-detail',args=[str(id)]))
self.assertEqual(response.status_code,200)
def test_view_uses_correct_template(self):
id = BlogAuthor.objects.get(id=1).id
response = self.client.get(reverse('blogauthor-detail',args=[str(id)]))
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response, 'blog/blogauthor_detail.html')
class BlogDetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user = User.objects.create_user(username='testuser1',password='testuser1')
test_user.save()
author = BlogAuthor.objects.create(name=test_user,bio='user1')
Blog.objects.create(author=author,title="post by user1",description="test desc")
def test_view_url_exists_at_desired_location(self):
id = Blog.objects.get(id=1).id
response = self.client.get('/blog/'+str(id))
self.assertEqual(response.status_code,301) # Getting a 301 instead of a 200
def test_view_url_accessible_by_name(self):
id = Blog.objects.get(id=1).id
response = self.client.get(reverse('blog-detail',args=[str(id)]))
self.assertEqual(response.status_code,200)
def test_view_uses_correct_template(self):
id = Blog.objects.get(id=1).id
response = self.client.get(reverse('blog-detail',args=[str(id)]))
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response, 'blog/blog_detail.html')
# Notes: the test below is already secured on the form and model tests
# HOWEVER: the self.client.get yields a 404 with the current syntax, and a NoReverseMatch when used with reverse
# I leave this as an exercise if testing the View of New comments is crucial to you, best of luck, Rob
"""
class NewCommentViewTest(TestCase):
def setUp(self):
# Create a user
test_user1 = User.objects.create_user(username='testuser1', password='1X<ISRUkw+tuK')
test_user2 = User.objects.create_user(username='testuser2', password='2HJ1vRV0Z&3iD')
test_user1.save()
test_user2.save()
author = BlogAuthor.objects.create(name=test_user1,bio='user1')
Blog.objects.create(author=author,title="post by user1",description="test desc")
def test_redirect_if_not_logged_in(self):
id = Blog.objects.get(id=1).id
response = self.client.get('blog/'+str(id)+'/create')
# Manually check redirect (Can't use assertRedirect, because the redirect URL is unpredictable)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.startswith('/accounts/login/'))
def test_logged_in(self):
id = Blog.objects.get(id=1).id
login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD')
response = self.client.get('blog/'+str(id)+'/create')
# Check that it lets us login
self.assertEqual(response.status_code, 200)
def test_uses_correct_template(self):
id = Blog.objects.get(id=1).id
login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD')
response = self.client.get('blog/'+str(id)+'/create')
self.assertEqual(response.status_code, 200)
# Check we used correct template
self.assertTemplateUsed(response, 'blog/comment_form.html')
"""
| 44.124294
| 115
| 0.696031
| 1,011
| 7,810
| 5.181998
| 0.160237
| 0.038175
| 0.054591
| 0.084176
| 0.790227
| 0.768658
| 0.758733
| 0.735827
| 0.710059
| 0.704333
| 0
| 0.018096
| 0.1863
| 7,810
| 177
| 116
| 44.124294
| 0.806294
| 0.043406
| 0
| 0.709091
| 0
| 0
| 0.091927
| 0.012381
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.2
| false
| 0.036364
| 0.036364
| 0
| 0.281818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.