hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc35e47ddd8b88da87844c5c131307aad3888dab
| 35,799
|
py
|
Python
|
pyEX/economic/economic.py
|
majacQ/pyEX
|
def9280fbaa17a2afe4434b6f584f1a602f4bc55
|
[
"Apache-2.0"
] | null | null | null |
pyEX/economic/economic.py
|
majacQ/pyEX
|
def9280fbaa17a2afe4434b6f584f1a602f4bc55
|
[
"Apache-2.0"
] | null | null | null |
pyEX/economic/economic.py
|
majacQ/pyEX
|
def9280fbaa17a2afe4434b6f584f1a602f4bc55
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from enum import Enum
from functools import lru_cache
from ..common import _expire, _UTC, _timeseriesWrapper
from ..points import points
from ..timeseries import timeSeries, timeSeriesDF
class EconomicPoints(Enum):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
Attributes:
US0; US 30-Year fixed rate mortgage average
US5; US 15-Year fixed rate mortgage average
US; US 5/1-Year adjustable rate mortgage average
FEDFUNDS; Effective federal funds rate
CREDITCARD; Commercial bank credit card interest rate as a percent, not seasonally adjusted
CDNJ; CD Rate Non-Jumbo less than $100,000 Money market
CDJ; CD Rate Jumbo more than $100,000 Money market
GDP; Real Gross Domestic Product
INDPRO; Industrial Production Index
CPI; Consumer Price Index All Urban Consumers
PAYROLL; Total nonfarm employees in thousands of persons seasonally adjusted
HOUSING; Total Housing Starts in thousands of units, seasonally adjusted annual rate
UNEMPLOYMENT; Unemployment rate returned as a percent, seasonally adjusted
VEHICLES; Total Vehicle Sales in millions of units
RECESSION; US Recession Probabilities. Smoothed recession probabilities for the United States are obtained from a dynamic-factor markov-switching model applied to four monthly coincident variables. non-farm payroll employment, the index of industrial production, real personal income excluding transfer payments, and real manufacturing and trade sales.
INITIALCLAIMS; Initial claims returned as a number, seasonally adjusted
RETAILMONEY; Retail money funds returned as billions of dollars, seasonally adjusted
INSTITUTIONALMONEY; Institutional money funds returned as billions of dollars, seasonally adjusted
"""
US30 = "MORTGAGE30US"
US15 = "MORTGAGE15US"
US5 = "MORTGAGE5US"
FEDFUNDS = "FEDFUNDS"
CREDITCARD = "TERMCBCCALLNS"
CDNJ = "MMNRNJ"
CDJ = "MMNRJD"
GDP = "A191RL1Q225SBEA"
INDPRO = "INDPRO"
CPI = "CPIAUCSL"
PAYROLL = "PAYEMS"
HOUSING = "HOUST"
UNEMPLOYMENT = "UNRATE"
VEHICLES = "TOTALSA"
RECESSION_PROB = "RECPROUSM156N"
INITIALCLAIMS = "IC4WSA"
RETAILMONEY = "WRMFSL"
INSTITUTIONALMONEY = "WIMFSL"
@staticmethod
@lru_cache(1)
def options():
"""Return a list of the available economic points options"""
return list(map(lambda c: c.value, EconomicPoints))
@_expire(hour=8, tz=_UTC)
def us30(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
US0; US 30-Year fixed rate mortgage average
"""
return points("MORTGAGE30US", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def us30History(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="MORTGAGE30US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def us30HistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="MORTGAGE30US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def us15(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
US5; US 15-Year fixed rate mortgage average
"""
return points("MORTGAGE15US", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def us15History(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="MORTGAGE15US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def us15HistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="MORTGAGE15US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def us5(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
US; US 5/1-Year adjustable rate mortgage average
"""
return points("MORTGAGE5US", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def us5History(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="MORTGAGE5US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def us5HistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="MORTGAGE5US",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fedfunds(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
FEDFUNDS; Effective federal funds rate
"""
return points("FEDFUNDS", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def fedfundsHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="FEDFUNDS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def fedfundsHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="FEDFUNDS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def creditcard(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
CREDITCARD; Commercial bank credit card interest rate as a percent, not seasonally adjusted
"""
return points("TERMCBCCALLNS", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def creditcardHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="RATES",
key="TERMCBCCALLNS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def creditcardHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="RATES",
key="TERMCBCCALLNS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cdnj(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
CDNJ; CD Rate Non-Jumbo less than $100,000 Money market
"""
return points("MMNRNJ", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def cdnjHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="RATES",
key="MMNRNJ",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cdnjHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="RATES",
key="MMNRNJ",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cdj(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
CDJ; CD Rate Jumbo more than $100,000 Money market
"""
return points("MMNRJD", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def cdjHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="RATES",
key="MMNRJD",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cdjHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="RATES",
key="MMNRJD",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def gdp(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
GDP; Real Gross Domestic Product
"""
return points("A191RL1Q225SBEA", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def gdpHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="A191RL1Q225SBEA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def gdpHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="A191RL1Q225SBEA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def indpro(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
INDPRO; Industrial Production Index
"""
return points("INDPRO", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def indproHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="INDPRO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def indproHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="INDPRO",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cpi(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
CPI; Consumer Price Index All Urban Consumers
"""
return points("CPIAUCSL", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def cpiHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="CPIAUCSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def cpiHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="CPIAUCSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def payroll(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
PAYROLL; Total nonfarm employees in thousands of persons seasonally adjusted
"""
return points("PAYEMS", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def payrollHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="PAYEMS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def payrollHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="PAYEMS",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def housing(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
HOUSING; Total Housing Starts in thousands of units, seasonally adjusted annual rate
"""
return points("HOUST", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def housingHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="HOUST",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def housingHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="HOUST",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def unemployment(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
UNEMPLOYMENT; Unemployment rate returned as a percent, seasonally adjusted
"""
return points("UNRATE", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def unemploymentHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="UNRATE",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def unemploymentHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="UNRATE",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def vehicles(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
VEHICLES; Total Vehicle Sales in millions of units
"""
return points("TOTALSA", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def vehiclesHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="TOTALSA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def vehiclesHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="TOTALSA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def recessionProb(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
RECESSION; US Recession Probabilities. Smoothed recession probabilities for the United States are obtained from a dynamic-factor markov-switching model applied to four monthly coincident variables. non-farm payroll employment, the index of industrial production, real personal income excluding transfer payments, and real manufacturing and trade sales.
"""
return points("RECPROUSM156N", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def recessionProbHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="RECPROUSM156N",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def recessionProbHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="RECPROUSM156N",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def initialClaims(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
INITIALCLAIMS; Initial claims returned as a number, seasonally adjusted
"""
return points("IC4WSA", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def initialClaimsHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="IC4WSA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def initialClaimsHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="IC4WSA",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def institutionalMoney(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
INSTITUTIONALMONEY; Institutional money funds returned as billions of dollars, seasonally adjusted
"""
return points("WRMFSL", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def institutionalMoneyHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="WRMFSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def institutionalMoneyHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="WRMFSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def retailMoney(token="", version="stable"):
"""Economic data points
https://iexcloud.io/docs/api/#economic-data
RETAILMONEY; Retail money funds returned as billions of dollars, seasonally adjusted
"""
return points("WIMFSL", token=token, version=version)
@_expire(hour=8, tz=_UTC)
def retailMoneyHistory(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="WIMFSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
@_expire(hour=8, tz=_UTC)
def retailMoneyHistoryDF(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeriesDF(
id="ECONOMIC",
key="WIMFSL",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
)
| 25.941304
| 360
| 0.634906
| 3,928
| 35,799
| 5.720723
| 0.063136
| 0.076899
| 0.060745
| 0.076944
| 0.927685
| 0.915046
| 0.914201
| 0.908682
| 0.898714
| 0.884117
| 0
| 0.006828
| 0.243191
| 35,799
| 1,379
| 361
| 25.960116
| 0.822574
| 0.474203
| 0
| 0.767918
| 0
| 0
| 0.080401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093857
| false
| 0
| 0.008532
| 0
| 0.228669
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc520c2c4f845c6e00f80fd02390564baef88ab8
| 592
|
py
|
Python
|
TwitchChatCode/twitchlogo.py
|
AdamSmif/Sportsball-Adventures
|
10de2ebf4ff2eb8e1ee77e52149b713841882a04
|
[
"MIT"
] | 2
|
2020-11-28T01:42:10.000Z
|
2022-01-21T07:49:27.000Z
|
TwitchChatCode/twitchlogo.py
|
AdamSmif/Sportsball-Adventures
|
10de2ebf4ff2eb8e1ee77e52149b713841882a04
|
[
"MIT"
] | 2
|
2020-11-28T01:33:55.000Z
|
2020-11-28T01:36:30.000Z
|
TwitchChatCode/twitchlogo.py
|
AdamSmif/Sportsball-Adventures
|
10de2ebf4ff2eb8e1ee77e52149b713841882a04
|
[
"MIT"
] | null | null | null |
def print_twitch_logo():
print(' _______ _ _ _ _____ _ _ _ ')
print('|__ __| (_) | | | / ____| | | | | | ')
print(' | |_ ___| |_ ___| |__ | | ___ _ __ | |_ _ __ ___ | | | ___ _ __ ')
print(" | \ \ /\ / / | __/ __| '_ \ | | / _ \| '_ \| __| '__/ _ \| | |/ _ \ '__|")
print(" | |\ V V /| | || (__| | | | | |___| (_) | | | | |_| | | (_) | | | __/ | ")
print(" |_| \_/\_/ |_|\__\___|_| |_| \_____\___/|_| |_|\__|_| \___/|_|_|\___|_| ")
| 65.777778
| 92
| 0.280405
| 12
| 592
| 4.166667
| 0.416667
| 0.8
| 0.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.466216
| 592
| 9
| 93
| 65.777778
| 0.158228
| 0
| 0
| 0
| 0
| 0.571429
| 0.799325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0
| 0
| 0.142857
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
cc638c85cc8035da764c4fa28c2a98ed49f5b5db
| 75
|
py
|
Python
|
dacy/utils.py
|
HLasse/DaCy
|
67ca82f3c1637193b140ecea7a683cd5d4c6749e
|
[
"Apache-2.0"
] | 1
|
2021-07-24T19:14:34.000Z
|
2021-07-24T19:14:34.000Z
|
dacy/utils.py
|
MalteHB/DaCy
|
1c3d348b14368c772d13344d35dc076b01d5bf07
|
[
"Apache-2.0"
] | null | null | null |
dacy/utils.py
|
MalteHB/DaCy
|
1c3d348b14368c772d13344d35dc076b01d5bf07
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def softmax(x):
return np.exp(x) / sum(np.exp(x))
| 12.5
| 37
| 0.626667
| 15
| 75
| 3.133333
| 0.666667
| 0.212766
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213333
| 75
| 5
| 38
| 15
| 0.79661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
cc766b470e7c9eef6ede7334e5b72e57feb42a21
| 121
|
py
|
Python
|
geco/mips/knapsack/__init__.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 8
|
2020-12-16T09:59:05.000Z
|
2022-03-18T09:48:43.000Z
|
geco/mips/knapsack/__init__.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 101
|
2020-11-09T10:20:03.000Z
|
2022-03-24T13:50:06.000Z
|
geco/mips/knapsack/__init__.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 3
|
2021-04-06T13:26:03.000Z
|
2022-03-22T13:22:16.000Z
|
from geco.mips.knapsack.generic import *
from geco.mips.knapsack.yang import *
from geco.mips.knapsack.pisinger import *
| 30.25
| 41
| 0.801653
| 18
| 121
| 5.388889
| 0.444444
| 0.247423
| 0.371134
| 0.618557
| 0.536082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099174
| 121
| 3
| 42
| 40.333333
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
aee46ac4bf1a513c7231f0d9b518309429d5345e
| 2,031
|
py
|
Python
|
appengine/networkx/algorithms/tests/test_graphical.py
|
CSE512-15S/a3-haynesb-Pending
|
881c3872304f2cd796bd4db7211ab8c3f108586b
|
[
"Apache-2.0"
] | 12
|
2015-03-25T20:20:26.000Z
|
2021-11-14T19:44:56.000Z
|
appengine/networkx/algorithms/tests/test_graphical.py
|
CSE512-15S/a3-haynesb-Pending
|
881c3872304f2cd796bd4db7211ab8c3f108586b
|
[
"Apache-2.0"
] | 71
|
2015-01-05T16:50:55.000Z
|
2020-09-30T19:17:47.000Z
|
appengine/networkx/algorithms/tests/test_graphical.py
|
CSE512-15S/a3-haynesb-Pending
|
881c3872304f2cd796bd4db7211ab8c3f108586b
|
[
"Apache-2.0"
] | 14
|
2015-02-15T22:19:18.000Z
|
2020-09-30T18:54:54.000Z
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
def test_valid_degree_sequence1():
n = 100
p = .3
for i in range(10):
G = nx.erdos_renyi_graph(n,p)
deg = list(G.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_valid_degree_sequence2():
n = 100
for i in range(10):
G = nx.barabasi_albert_graph(n,1)
deg = list(G.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_atlas():
for graph in nx.graph_atlas_g():
deg = list(graph.degree().values())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_small_graph_true():
z=[5,3,3,3,3,2,2,2,1,1,1]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[10,3,3,3,3,2,2,2,2,2,2]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
def test_small_graph_false():
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[6,5,4,4,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
| 38.320755
| 76
| 0.622846
| 335
| 2,031
| 3.504478
| 0.155224
| 0.187394
| 0.13799
| 0.229983
| 0.798978
| 0.796422
| 0.796422
| 0.763203
| 0.763203
| 0.763203
| 0
| 0.050826
| 0.225012
| 2,031
| 52
| 77
| 39.057692
| 0.695044
| 0.009847
| 0
| 0.571429
| 0
| 0
| 0.017928
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.119048
| false
| 0
| 0.047619
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e3a2c28b52894460e5e4e01fa8a0952116c9c5e
| 3,205
|
py
|
Python
|
arackodu1.py
|
IMekatronik/OFFICE-BOY1
|
4feee0a9823aa9fd97946484e991f628c50a915f
|
[
"MIT"
] | null | null | null |
arackodu1.py
|
IMekatronik/OFFICE-BOY1
|
4feee0a9823aa9fd97946484e991f628c50a915f
|
[
"MIT"
] | null | null | null |
arackodu1.py
|
IMekatronik/OFFICE-BOY1
|
4feee0a9823aa9fd97946484e991f628c50a915f
|
[
"MIT"
] | null | null | null |
from gpiozero import Button, LED
from time import sleep
mr_2 = LED(17)
mr_1 = LED(27)
mr_e = LED(22)
ml_2 = LED(18)
ml_1 = LED(23)
ml_e = LED(24)
button_1 = Button(5)
button_2 = Button(6)
button_3 = Button(13)
button_4 = Button(19)
button_5 = Button(26)
button_6 = Button(12)
button_7 = Button(16)
while True:
mr_1.on()
mr_2.off()
mr_e.off()
ml_1.on()
ml_2.off()
ml_e.off()
if button_4.is_pressed == 1 and button_5.is_pressed == 1:
while not button_6.is_pressed == 1 or not button_7.is_pressed == 1:
if button_1.is_pressed and not button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.off()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and not button_2.is_pressed and button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.off()
if button_4.is_pressed == 0 and button_5.is_pressed == 1:
while not button_6.is_pressed == 0 or not button_7.is_pressed == 1:
if button_1.is_pressed and not button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.off()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and not button_2.is_pressed and button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.off()
if button_4.is_pressed == 1 and button_5.is_pressed == 0:
while not button_6.is_pressed == 1 or not button_7.is_pressed == 0:
if button_1.is_pressed and not button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.off()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and button_2.is_pressed and not button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.on()
if not button_1.is_pressed and not button_2.is_pressed and button_3.is_pressed:
mr_1.on()
mr_2.off()
mr_e.on()
ml_1.on()
ml_2.off()
ml_e.off()
| 27.869565
| 92
| 0.468331
| 474
| 3,205
| 2.848101
| 0.082278
| 0.26
| 0.16
| 0.133333
| 0.844444
| 0.844444
| 0.844444
| 0.844444
| 0.844444
| 0.844444
| 0
| 0.07
| 0.438378
| 3,205
| 115
| 93
| 27.869565
| 0.68
| 0
| 0
| 0.758242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021978
| 0
| 0.021978
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9d65ffd42ceb90dd769072f383041240207a07d2
| 33
|
py
|
Python
|
gputools/deconv/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 89
|
2015-08-28T14:17:33.000Z
|
2022-01-20T16:19:34.000Z
|
gputools/deconv/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 24
|
2015-08-28T19:06:22.000Z
|
2022-02-21T21:10:13.000Z
|
gputools/deconv/__init__.py
|
gmazzamuto/gputools
|
73a4dee76a119f94d8163781a85b691fd080d506
|
[
"BSD-3-Clause"
] | 17
|
2015-08-28T18:56:43.000Z
|
2021-09-15T23:15:36.000Z
|
from .deconv_rl import deconv_rl
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.333333
| 0.666667
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9d8ec0b8bd732c6af67a276c249bdeb5b7383efe
| 29,455
|
py
|
Python
|
annotation_scripts/annotate_synthetic_YCBV_train.py
|
sThalham/UDAPose
|
d23edcce18f6e8aec4f35e0894b676d4ae19686e
|
[
"Apache-2.0"
] | 2
|
2020-12-03T03:02:45.000Z
|
2022-01-13T17:50:41.000Z
|
annotation_scripts/annotate_synthetic_YCBV_train.py
|
sThalham/UDAPose
|
d23edcce18f6e8aec4f35e0894b676d4ae19686e
|
[
"Apache-2.0"
] | 2
|
2021-02-08T16:14:40.000Z
|
2021-03-25T09:04:09.000Z
|
annotation_scripts/annotate_synthetic_YCBV_train.py
|
sThalham/UDAPose
|
d23edcce18f6e8aec4f35e0894b676d4ae19686e
|
[
"Apache-2.0"
] | null | null | null |
import os
import yaml
import cv2
import numpy as np
import datetime
import copy
import transforms3d as tf3d
import time
import random
import json
import math
import OpenEXR, Imath
from pathlib import Path
from misc import manipulate_RGB, toPix_array, toPix
from Augmentations import augmentDepth, maskDepth, augmentRGB, augmentAAEext, augmentRGB_V2, augmentRGB_V3, get_normal
def get_cont_sympose(rot_pose, sym):
cam_in_obj = np.dot(np.linalg.inv(rot_pose), (0, 0, 0, 1))
alpha = math.atan2(cam_in_obj[1], cam_in_obj[0])
rot_pose[:3, :3] = np.dot(rot_pose[:3, :3], tf3d.euler.euler2mat(0.0, 0.0, alpha, 'sxyz'))
return rot_pose
def get_disc_sympose(rot_pose, sym, oid):
if len(sym) > 3:
sym = np.array(sym, dtype=np.float32)
if sym[0, 0] == 1:
c_alpha = np.dot([0, 1, 0], np.dot(rot_pose[0:3, 0:3], [0, 1, 0]))
if c_alpha < 0:
rot_pose_new = np.dot(rot_pose, sym)
else:
rot_pose_new = rot_pose
if sym[1, 1] == 1:
c_alpha = np.dot([1, 0, 0], np.dot(rot_pose[0:3, 0:3], [1, 0, 0]))
if c_alpha < 0:
rot_pose_new = np.dot(rot_pose, sym)
else:
rot_pose_new = rot_pose
if sym[2, 2] == 1:
c_alpha = np.dot([1, 0, 0], np.dot(rot_pose[0:3, 0:3], [1, 0, 0]))
if c_alpha < 0:
rot_pose_new = np.dot(rot_pose, sym)
else:
rot_pose_new = rot_pose
else:
rot_pose_new = rot_pose
else:
rot_pose1 = np.dot(rot_pose, sym[0])
rot_pose2 = np.dot(rot_pose, sym[1])
rot_pose3 = np.dot(rot_pose, sym[2])
alpha_0 = np.dot([1, 0, 0], np.dot(rot_pose[0:3, 0:3], [1, 0, 0]))
alpha_1 = np.dot([1, 0, 0], np.dot(rot_pose1[0:3, 0:3], [1, 0, 0]))
alpha_2 = np.dot([1, 0, 0], np.dot(rot_pose2[0:3, 0:3], [1, 0, 0]))
alpha_3 = np.dot([1, 0, 0], np.dot(rot_pose3[0:3, 0:3], [1, 0, 0]))
if alpha_1 < alpha_0 and alpha_1 < alpha_2 and alpha_1 < alpha_3:
rot_pose_new = rot_pose1
elif alpha_2 < alpha_0 and alpha_2 < alpha_1 and alpha_2 < alpha_3:
rot_pose_new = rot_pose2
elif alpha_3 < alpha_0 and alpha_3 < alpha_1 and alpha_3 < alpha_1:
rot_pose_new = rot_pose3
else:
rot_pose_new = rot_pose
return rot_pose_new
if __name__ == "__main__":
<<<<<<< HEAD
root = '/home/stefan/data/rendered_data/ycbv_rgbd/patches'
root2 = '/home/stefan/data/rendered_data/ycbv_rgbd_2/patches'
target = '/home/stefan/data/train_data/ycbv_RGBD_V2/'
mesh_info = '/home/stefan/data/Meshes/ycb_video_st/models/models_info.json'
=======
root = '/home/sthalham/ycb_test/patches'
target = '/home/sthalham/data/prepro/ycbv_RGBD/'
mesh_info = '/home/sthalham/data/Meshes/ycbv_st/models/models_info.json'
>>>>>>> 177484e6aa32844a6e9ebe9a55dc81406dd72afc
visu = False
resX = 640
resY = 480
fxkin = 579.68 # blender calculated
fykin = 542.31 # blender calculated
cxkin = 320
cykin = 240
depthCut = 2000
threeD_boxes = np.ndarray((22, 8, 3), dtype=np.float32)
sym_cont = np.ndarray((22, 3), dtype=np.float32)
sym_disc = np.ndarray((28, 4, 4), dtype=np.float32)
for key, value in json.load(open(mesh_info)).items():
fac = 0.001
x_minus = value['min_x'] * fac
y_minus = value['min_y'] * fac
z_minus = value['min_z'] * fac
x_plus = value['size_x'] * fac + x_minus
y_plus = value['size_y'] * fac + y_minus
z_plus = value['size_z'] * fac + z_minus
three_box_solo = np.array([[x_plus, y_plus, z_plus],
[x_plus, y_plus, z_minus],
[x_plus, y_minus, z_minus],
[x_plus, y_minus, z_plus],
[x_minus, y_plus, z_plus],
[x_minus, y_plus, z_minus],
[x_minus, y_minus, z_minus],
[x_minus, y_minus, z_plus]])
threeD_boxes[int(key), :, :] = three_box_solo
if "symmetries_continuous" in value:
sym_cont[int(key), :] = np.asarray(value['symmetries_continuous'][0]['axis'], dtype=np.float32)
elif "symmetries_discrete" in value:
syms = value['symmetries_discrete']
#Obj 16
if len(syms) > 1:
sym_disc[int(key), :, :] = np.asarray(syms[0], dtype=np.float32).reshape((4, 4))
sym_disc[22, :, :] = np.asarray(syms[1], dtype=np.float32).reshape((4, 4))
sym_disc[23, :, :] = np.asarray(syms[2], dtype=np.float32).reshape((4, 4))
sym_disc[24, :, :] = np.asarray(syms[3], dtype=np.float32).reshape((4, 4))
sym_disc[25, :, :] = np.asarray(syms[4], dtype=np.float32).reshape((4, 4))
sym_disc[26, :, :] = np.asarray(syms[5], dtype=np.float32).reshape((4, 4))
sym_disc[27, :, :] = np.asarray(syms[6], dtype=np.float32).reshape((4, 4))
else:
sym_disc[int(key), :, :] = np.asarray(syms[0], dtype=np.float32).reshape((4,4))
else:
pass
now = datetime.datetime.now()
dateT = str(now)
dict = {"info": {
"description": "tless",
"url": "cmp.felk.cvut.cz/t-less/",
"version": "1.0",
"year": 2018,
"contributor": "Stefan Thalhammer",
"date_created": dateT
},
"licenses": [],
"images": [],
"annotations": [],
"categories": []
}
dictVal = copy.deepcopy(dict)
annoID = 0
gloCo = 0
times = []
trainN = 1
testN = 1
valN = 1
depPath = root + "/depth/"
partPath = root + "/part/"
gtPath = root
maskPath = root + "/mask/"
rgbPath = root + "/rgb/"
excludedImgs = []
boxWidths = []
boxHeights = []
meanRGBD = np.zeros((6), np.float64)
syns = os.listdir(root)
syns2 = os.listdir(root2)
all = len(syns) + len(syns2)
for fileInd in syns:
if fileInd.endswith(".yaml"):
start_time = time.time()
gloCo = gloCo + 1
redname = fileInd[:-8]
gtfile = gtPath + '/' + fileInd
depfile = depPath + redname + "_depth.exr"
partfile = partPath + redname + "_part.png"
maskfile = maskPath + redname + "_mask.npy"
rgbfile = rgbPath + redname + "_rgb.png"
depth_refine, rgb_refine, mask, bboxes, poses, mask_ids, visibilities = manipulate_RGB(gtfile, depfile, partfile, rgbfile)
try:
obj_mask = np.load(maskfile)
except Exception:
continue
obj_mask = obj_mask.astype(np.int8)
if bboxes is None:
excludedImgs.append(int(redname))
continue
depth_refine = np.multiply(depth_refine, 1000.0) # to millimeters
rows, cols = depth_refine.shape
for k in range(0, 1):
newredname = redname[1:] + str(k)
fileName = target + "images/train/" + newredname + '_rgb.jpg'
myFile = Path(fileName)
print(myFile)
if myFile.exists():
print('File exists, skip encoding and safing.')
else:
depthAug = maskDepth(depth_refine, obj_mask, mask)
rgbAug = rgb_refine
depthAug[depthAug > depthCut] = 0
aug_dep = depthAug.astype(np.uint16)
meanRGBD[0] += np.nanmean(rgbAug[:, :, 0])
meanRGBD[1] += np.nanmean(rgbAug[:, :, 1])
meanRGBD[2] += np.nanmean(rgbAug[:, :, 2])
meanRGBD[3] += np.nanmean(aug_dep[:, :])
meanRGBD[4] += np.nanmean(aug_dep[:, :])
meanRGBD[5] += np.nanmean(aug_dep[:, :])
cv2.imwrite(fileName, rgbAug)
cv2.imwrite(fileName[:-8] + '_dep.png', aug_dep)
imgID = int(newredname)
imgName = newredname + '.jpg'
# print(imgName)
# bb scaling because of image scaling
bbvis = []
bb3vis = []
cats = []
posvis = []
postra = []
# for i, bbox in enumerate(bboxes[:-1]):
for i, bbox in enumerate(bboxes[:-1]):
if visibilities[i] < 0.5:
# print('visivility: ', visibilities[i], ' skip!')
continue
#print(visibilities[i])
#if (np.asscalar(bbox[0]) + 1) > 13:
# continue
bbvis.append(bbox.astype(int))
objID = np.asscalar(bbox[0]) + 1
#objID = np.asscalar(bboxes[i+1][0]) + 1
cats.append(objID)
bbox = (bbox).astype(int)
#rot = tf3d.quaternions.quat2mat(poses[i, 3:])
#rot = np.asarray(rot, dtype=np.float32)
rot = tf3d.quaternions.quat2mat(poses[i, 3:])
tra = poses[i, 0:3]
pose = np.zeros((4, 4), dtype=np.float32)
pose[:3, :3] = rot
pose[:3, 3] = tra
pose[3, 3] = 1
if objID in [13, 18]:
rot = get_cont_sympose(pose, sym_cont[objID, :])
elif objID in [1, 19, 20, 21]:
rot = get_disc_sympose(pose, sym_disc[objID, :, :], objID)
#elif objID == 16:
# rot = get_disc_sympose(pose, [sym_disc[16, :, :], sym_disc[22, :, :], sym_disc[23, :, :], sym_disc[24, :, :], sym_disc[25, :, :], sym_disc[26, :, :], sym_disc[27, :, :]],
# objID)
rot = np.asarray(rot, dtype=np.float32)
<<<<<<< HEAD
tDbox = rot[:3, :3].dot(threeD_boxes[objID, :, :].T).T
tDbox = tDbox + np.repeat(poses[i, np.newaxis, 0:3], 8, axis=0)
# if objID == 10 or objID == 11:
# print(tf3d.euler.quat2euler(poses[i, 3:]))
box3D = toPix_array(tDbox, fx=fxkin, fy=fykin, cx=cxkin, cy=cykin)
box3D = np.reshape(box3D, (16))
box3D = box3D.tolist()
bb3vis.append(box3D)
bbox = bbox.astype(int)
x1 = np.asscalar(bbox[2])
y1 = np.asscalar(bbox[1])
x2 = np.asscalar(bbox[4])
y2 = np.asscalar(bbox[3])
nx1 = bbox[2]
ny1 = bbox[1]
nx2 = bbox[4]
ny2 = bbox[3]
w = (x2 - x1)
h = (y2 - y1)
boxWidths.append(w)
boxHeights.append(h)
bb = [x1, y1, w, h]
area = w * h
npseg = np.array([nx1, ny1, nx2, ny1, nx2, ny2, nx1, ny2])
seg = npseg.tolist()
pose = [np.asscalar(poses[i, 0]), np.asscalar(poses[i, 1]), np.asscalar(poses[i, 2]),
np.asscalar(poses[i, 3]), np.asscalar(poses[i, 4]), np.asscalar(poses[i, 5]),
np.asscalar(poses[i, 6])]
if i != len(bboxes):
pose[0:2] = toPix(pose[0:3], fx=fxkin, fy=fykin, cx=cxkin, cy=cykin)
posvis.append(pose)
tra = np.asarray(poses[i, :3], dtype=np.float32)
postra.append(tra)
annoID = annoID + 1
tempTA = {
"id": annoID,
"image_id": imgID,
"category_id": objID,
"bbox": bb,
"pose": pose,
"segmentation": box3D,
"area": area,
"iscrowd": 0,
# "feature_visibility": feat_vis
}
# print('norm q: ', np.linalg.norm(pose[3:]))
dict["annotations"].append(tempTA)
tempTL = {
"url": "cmp.felk.cvut.cz/t-less/",
"id": imgID,
"name": imgName
}
dict["licenses"].append(tempTL)
tempTV = {
"license": 2,
"url": "cmp.felk.cvut.cz/t-less/",
"file_name": imgName,
"height": resY,
"width": resX,
"date_captured": dateT,
"id": imgID
}
dict["images"].append(tempTV)
gloCo += 1
elapsed_time = time.time() - start_time
times.append(elapsed_time)
meantime = sum(times) / len(times)
eta = ((all - gloCo) * meantime) / 60
if gloCo % 100 == 0:
print('eta: ', eta, ' min')
times = []
if visu is True:
img = rgbAug
for i, bb in enumerate(bbvis):
# if cats[i] not in [19, 20, 23]:
# continue
bb = np.array(bb)
cv2.rectangle(img, (int(bb[2]), int(bb[1])), (int(bb[4]), int(bb[3])),
(255, 255, 255), 2)
cv2.rectangle(img, (int(bb[2]), int(bb[1])), (int(bb[4]), int(bb[3])),
(0, 0, 0), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(bb[2]), int(bb[1]))
fontScale = 1
fontColor = (0, 0, 0)
fontthickness = 1
lineType = 2
gtText = str(cats[i])
# print(cats[i])
fontColor2 = (255, 255, 255)
fontthickness2 = 3
cv2.putText(img, gtText,
bottomLeftCornerOfText,
font,
fontScale,
fontColor2,
fontthickness2,
lineType)
cv2.putText(img, gtText,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
fontthickness,
lineType)
# print(posvis[i])
if i is not poses.shape[0]:
pose = np.asarray(bb3vis[i], dtype=np.float32)
print(pose)
colR = 250
colG = 25
colB = 175
img = cv2.line(img, tuple(pose[0:2].ravel()), tuple(pose[2:4].ravel()), (130, 245, 13), 2)
img = cv2.line(img, tuple(pose[2:4].ravel()), tuple(pose[4:6].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[4:6].ravel()), tuple(pose[6:8].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[6:8].ravel()), tuple(pose[0:2].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[0:2].ravel()), tuple(pose[8:10].ravel()), (colR, colG, colB),
2)
img = cv2.line(img, tuple(pose[2:4].ravel()), tuple(pose[10:12].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[4:6].ravel()), tuple(pose[12:14].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[6:8].ravel()), tuple(pose[14:16].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[8:10].ravel()), tuple(pose[10:12].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[10:12].ravel()), tuple(pose[12:14].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[12:14].ravel()), tuple(pose[14:16].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[14:16].ravel()), tuple(pose[8:10].ravel()),
(colR, colG, colB), 2)
cv2.imwrite(fileName, img)
print('STOP')
for fileInd in syns2:
if fileInd.endswith(".yaml"):
start_time = time.time()
gloCo = gloCo + 1
redname = fileInd[:-8]
gtfile = gtPath + '/' + fileInd
depfile = depPath + redname + "_depth.exr"
partfile = partPath + redname + "_part.png"
maskfile = maskPath + redname + "_mask.npy"
rgbfile = rgbPath + redname + "_rgb.png"
depth_refine, rgb_refine, mask, bboxes, poses, mask_ids, visibilities = manipulate_RGB(gtfile, depfile, partfile, rgbfile)
try:
obj_mask = np.load(maskfile)
except Exception:
continue
obj_mask = obj_mask.astype(np.int8)
if bboxes is None:
excludedImgs.append(int(redname))
continue
depth_refine = np.multiply(depth_refine, 1000.0) # to millimeters
rows, cols = depth_refine.shape
for k in range(0, 1):
newredname = str(2) + redname[1:] + str(k)
fileName = target + "images/train/" + newredname + '_rgb.jpg'
myFile = Path(fileName)
print(myFile)
if myFile.exists():
print('File exists, skip encoding and safing.')
else:
depthAug = maskDepth(depth_refine, obj_mask, mask)
rgbAug = rgb_refine
depthAug[depthAug > depthCut] = 0
aug_dep = depthAug.astype(np.uint16)
meanRGBD[0] += np.nanmean(rgbAug[:, :, 0])
meanRGBD[1] += np.nanmean(rgbAug[:, :, 1])
meanRGBD[2] += np.nanmean(rgbAug[:, :, 2])
meanRGBD[3] += np.nanmean(aug_dep[:, :])
meanRGBD[4] += np.nanmean(aug_dep[:, :])
meanRGBD[5] += np.nanmean(aug_dep[:, :])
cv2.imwrite(fileName, rgbAug)
cv2.imwrite(fileName[:-8] + '_dep.png', aug_dep)
imgID = int(newredname)
imgName = newredname + '.jpg'
# print(imgName)
# bb scaling because of image scaling
bbvis = []
bb3vis = []
cats = []
posvis = []
postra = []
# for i, bbox in enumerate(bboxes[:-1]):
for i, bbox in enumerate(bboxes[:-1]):
if visibilities[i] < 0.5:
# print('visivility: ', visibilities[i], ' skip!')
continue
#print(visibilities[i])
#if (np.asscalar(bbox[0]) + 1) > 13:
# continue
bbvis.append(bbox.astype(int))
objID = np.asscalar(bbox[0]) + 1
#objID = np.asscalar(bboxes[i+1][0]) + 1
cats.append(objID)
bbox = (bbox).astype(int)
#rot = tf3d.quaternions.quat2mat(poses[i, 3:])
#rot = np.asarray(rot, dtype=np.float32)
rot = tf3d.quaternions.quat2mat(poses[i, 3:])
tra = poses[i, 0:3]
pose = np.zeros((4, 4), dtype=np.float32)
pose[:3, :3] = rot
pose[:3, 3] = tra
pose[3, 3] = 1
if objID in [13, 18]:
rot = get_cont_sympose(pose, sym_cont[objID, :])
elif objID in [1, 19, 20, 21]:
rot = get_disc_sympose(pose, sym_disc[objID, :, :], objID)
# elif objID == 16:
# rot = get_disc_sympose(pose, [sym_disc[16, :, :], sym_disc[22, :, :], sym_disc[23, :, :], sym_disc[24, :, :], sym_disc[25, :, :], sym_disc[26, :, :], sym_disc[27, :, :]],
# objID)
rot = np.asarray(rot, dtype=np.float32)
=======
cls = objID
>>>>>>> 177484e6aa32844a6e9ebe9a55dc81406dd72afc
tDbox = rot[:3, :3].dot(threeD_boxes[objID, :, :].T).T
tDbox = tDbox + np.repeat(poses[i, np.newaxis, 0:3], 8, axis=0)
# if objID == 10 or objID == 11:
# print(tf3d.euler.quat2euler(poses[i, 3:]))
box3D = toPix_array(tDbox, fx=fxkin, fy=fykin, cx=cxkin, cy=cykin)
box3D = np.reshape(box3D, (16))
box3D = box3D.tolist()
bb3vis.append(box3D)
bbox = bbox.astype(int)
x1 = np.asscalar(bbox[2])
y1 = np.asscalar(bbox[1])
x2 = np.asscalar(bbox[4])
y2 = np.asscalar(bbox[3])
nx1 = bbox[2]
ny1 = bbox[1]
nx2 = bbox[4]
ny2 = bbox[3]
w = (x2 - x1)
h = (y2 - y1)
boxWidths.append(w)
boxHeights.append(h)
bb = [x1, y1, w, h]
area = w * h
npseg = np.array([nx1, ny1, nx2, ny1, nx2, ny2, nx1, ny2])
seg = npseg.tolist()
pose = [np.asscalar(poses[i, 0]), np.asscalar(poses[i, 1]), np.asscalar(poses[i, 2]),
np.asscalar(poses[i, 3]), np.asscalar(poses[i, 4]), np.asscalar(poses[i, 5]),
np.asscalar(poses[i, 6])]
if i != len(bboxes):
pose[0:2] = toPix(pose[0:3], fx=fxkin, fy=fykin, cx=cxkin, cy=cykin)
posvis.append(pose)
tra = np.asarray(poses[i, :3], dtype=np.float32)
postra.append(tra)
annoID = annoID + 1
tempTA = {
"id": annoID,
"image_id": imgID,
"category_id": objID,
"bbox": bb,
"pose": pose,
"segmentation": box3D,
"area": area,
"iscrowd": 0,
# "feature_visibility": feat_vis
}
# print('norm q: ', np.linalg.norm(pose[3:]))
dict["annotations"].append(tempTA)
tempTL = {
"url": "cmp.felk.cvut.cz/t-less/",
"id": imgID,
"name": imgName
}
dict["licenses"].append(tempTL)
tempTV = {
"license": 2,
"url": "cmp.felk.cvut.cz/t-less/",
"file_name": imgName,
"height": resY,
"width": resX,
"date_captured": dateT,
"id": imgID
}
dict["images"].append(tempTV)
gloCo += 1
elapsed_time = time.time() - start_time
times.append(elapsed_time)
meantime = sum(times) / len(times)
eta = ((all - gloCo) * meantime) / 60
if gloCo % 100 == 0:
print('eta: ', eta, ' min')
times = []
if visu is True:
img = rgbAug
for i, bb in enumerate(bbvis):
# if cats[i] not in [19, 20, 23]:
# continue
bb = np.array(bb)
cv2.rectangle(img, (int(bb[2]), int(bb[1])), (int(bb[4]), int(bb[3])),
(255, 255, 255), 2)
cv2.rectangle(img, (int(bb[2]), int(bb[1])), (int(bb[4]), int(bb[3])),
(0, 0, 0), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(bb[2]), int(bb[1]))
fontScale = 1
fontColor = (0, 0, 0)
fontthickness = 1
lineType = 2
gtText = str(cats[i])
# print(cats[i])
fontColor2 = (255, 255, 255)
fontthickness2 = 3
cv2.putText(img, gtText,
bottomLeftCornerOfText,
font,
fontScale,
fontColor2,
fontthickness2,
lineType)
cv2.putText(img, gtText,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
fontthickness,
lineType)
# print(posvis[i])
if i is not poses.shape[0]:
pose = np.asarray(bb3vis[i], dtype=np.float32)
print(pose)
colR = 250
colG = 25
colB = 175
img = cv2.line(img, tuple(pose[0:2].ravel()), tuple(pose[2:4].ravel()), (130, 245, 13), 2)
img = cv2.line(img, tuple(pose[2:4].ravel()), tuple(pose[4:6].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[4:6].ravel()), tuple(pose[6:8].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[6:8].ravel()), tuple(pose[0:2].ravel()), (50, 112, 220), 2)
img = cv2.line(img, tuple(pose[0:2].ravel()), tuple(pose[8:10].ravel()), (colR, colG, colB),
2)
img = cv2.line(img, tuple(pose[2:4].ravel()), tuple(pose[10:12].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[4:6].ravel()), tuple(pose[12:14].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[6:8].ravel()), tuple(pose[14:16].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[8:10].ravel()), tuple(pose[10:12].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[10:12].ravel()), tuple(pose[12:14].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[12:14].ravel()), tuple(pose[14:16].ravel()),
(colR, colG, colB), 2)
img = cv2.line(img, tuple(pose[14:16].ravel()), tuple(pose[8:10].ravel()),
(colR, colG, colB), 2)
cv2.imwrite(fileName, img)
print('STOP')
catsInt = range(1, 22)
for s in catsInt:
objName = str(s)
tempC = {
"id": s,
"name": objName,
"supercategory": "object"
}
dict["categories"].append(tempC)
traAnno = target + "annotations/instances_train.json"
with open(traAnno, 'w') as fpT:
json.dump(dict, fpT)
excludedImgs.sort()
print('excluded images: ')
for ex in excludedImgs:
print(ex)
all_rendered = len(os.listdir(target + "images/train/")) * 0.5
means = meanRGBD / all_rendered
print('means: ', means)
print('Chill for once in your life... everything\'s done')
| 40.129428
| 195
| 0.420268
| 3,105
| 29,455
| 3.892432
| 0.125282
| 0.035744
| 0.019858
| 0.025815
| 0.79439
| 0.77718
| 0.772299
| 0.753847
| 0.731425
| 0.730018
| 0
| 0.064141
| 0.447938
| 29,455
| 733
| 196
| 40.184175
| 0.67911
| 0
| 0
| 0.72711
| 0
| 0
| 0.049519
| 0.018835
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.001795
| 0.02693
| null | null | 0.025135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9d9f32a3082b6602ec52717c309388735777bb65
| 20,861
|
py
|
Python
|
chess_rules.py
|
cakesterman/Chess-Bot
|
9c0d2d5c4636ae1bee9ac8e3ec7408b076fb0c93
|
[
"MIT"
] | null | null | null |
chess_rules.py
|
cakesterman/Chess-Bot
|
9c0d2d5c4636ae1bee9ac8e3ec7408b076fb0c93
|
[
"MIT"
] | null | null | null |
chess_rules.py
|
cakesterman/Chess-Bot
|
9c0d2d5c4636ae1bee9ac8e3ec7408b076fb0c93
|
[
"MIT"
] | null | null | null |
def check_valid_move(game_piece, current_pos, pos_to_move, chess_board):
#rules = {"Rook": }
# print(game_piece)
if game_piece == "Black Rook" or game_piece == "White Rook":
moves, captures = rook_calculate_all_possible_moves(current_pos, chess_board)
if pos_to_move in moves:
return True, moves, captures
elif pos_to_move in captures:
# print("Capturing")
return True, moves, captures
return False, moves, captures
elif game_piece == "Black Knight" or game_piece == "White Knight":
moves, captures = knight_calculate_all_possible_moves(current_pos, chess_board)
#print(f"Can capture {captures}")
if pos_to_move in moves:
return True, moves, captures
elif pos_to_move in captures:
# print("Capturing")
return True, moves, captures
else:
return False, moves, captures
elif game_piece == "Black Bishop" or game_piece == "White Bishop":
moves, captures = bishop_calculate_all_possible_moves(current_pos, chess_board)
if pos_to_move in moves:
return True, moves, captures
elif pos_to_move in captures:
# print("Capturing")
return True, moves, captures
else:
return False, moves, captures
#print(bishop_calculate_all_possible_moves(current_pos, chess_board))
elif game_piece == "Black Queen" or game_piece == "White Queen":
moves, captures = queen_calculate_all_possible_moves(current_pos, chess_board)
if pos_to_move in moves:
return True, moves, captures
elif pos_to_move in captures:
return True, moves, captures
else:
return False, moves, captures
elif game_piece == "Black King" or game_piece == "White King":
moves, captures = king_calculate_all_possible_moves(current_pos, chess_board)
if pos_to_move in moves:
return True, moves, captures
elif pos_to_move in captures:
return True, moves, captures
else:
return False, moves, captures
elif game_piece == "Black Pawn" or game_piece == "White Pawn":
moves, captures = pawn_calculate_all_possible_moves(current_pos, chess_board)
if pos_to_move in moves:
chess_board[current_pos].set_first_move_false()
return True, moves, captures
elif pos_to_move in captures:
chess_board[current_pos].set_first_move_false()
return True, moves, captures
else:
return False, moves, captures
# Temporary else for testing and moving all other game pieces
else:
return True
def rook_calculate_all_possible_moves(current_pos, chess_board):
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
x = current_pos[0]
y = current_pos[1]
# New code for Rook, probably broke some shit, ill find out later
# Checking x axis going left and right
if 0 <= x <= 7:
# x is between 0 and 7
# Check x axis going left
for index in range(x - 1, 0, -1):
# If the position is empty, add to all_possible_moves
if chess_board.get((index, y)) is None:
all_possible_moves.append( (index, y) )
# If the position is not empty and is an enemy piece, add to all_possible_captures
elif chess_board.get((index, y)).get_player_side() != player:
all_possible_captures.append( (index, y) )
break
else:
break
# Checking x axis going right
for index in range(x + 1, 7):
# If the position is empty, add to all_possible_moves
if chess_board.get((index, y)) is None:
all_possible_moves.append((index, y))
# If the position is not empty and is an enemy piece, add to all_possible_captures
elif chess_board.get((index, y)).get_player_side() != player:
all_possible_captures.append((index, y))
break
else:
break
# Checking y axis
if 0 <= y <= 7:
# Check y axis going up
for index in range(y - 1, 0, -1):
if chess_board.get((x, index)) is None:
all_possible_moves.append( (x, index) )
elif chess_board.get((x, index)).get_player_side() != player:
all_possible_captures.append((x, index))
else:
break
# Check y axis going down
for index in range(y + 1, 7):
if chess_board.get((x, index)) is None:
all_possible_moves.append((x, index))
elif chess_board.get((x, index)).get_player_side() != player:
all_possible_captures.append((x, index))
else:
break
return all_possible_moves, all_possible_captures
def knight_calculate_all_possible_moves(current_pos, chess_board):
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
def check_bounds_and_chess_board(x, y):
if 0 <= x < 8 and 0 <= y < 8:
if chess_board.get((x, y)) is None:
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
all_possible_captures.append((x, y))
for x in range(8):
for y in range(8):
if x == (current_pos[0] - 1) and y == (current_pos[1] + 2):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] + 1) and y == (current_pos[1] + 2):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] - 1) and y == (current_pos[1] - 2):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] + 1) and y == (current_pos[1] - 2):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] - 2) and y == (current_pos[1] + 1):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] - 2) and y == (current_pos[1] - 1):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] + 2) and y == (current_pos[1] + 1):
check_bounds_and_chess_board(x, y)
elif x == (current_pos[0] + 2) and y == (current_pos[1] - 1):
check_bounds_and_chess_board(x, y)
# print(all_possible_captures)
return all_possible_moves, all_possible_captures
def bishop_calculate_all_possible_moves(current_pos, chess_board):
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
def search_left_down_diagonally():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x -= 1
y += 1
if chess_board.get((x, y)) is None:
if x < 0 or y > 7:
possible = False
break
#print("At location ({}, {})".format(x, y))
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x < 0 or y > 7:
possible = False
break
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_right_down_diagonally():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x += 1
y += 1
# if x == 7 or y == 7:
# possible = False
if chess_board.get((x, y)) is None:
if x > 7 or y > 7:
possible = False
break
#print("At location ({}, {})".format(x, y))
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x > 7 or y > 7:
possible = False
break
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_left_up_diagonally():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x -= 1
y -= 1
if chess_board.get((x, y)) is None:
if x < 0 or y < 0:
possible = False
break
#print("At location ({}, {})".format(x, y))
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x < 0 or y < 0:
possible = False
break
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_right_up_diagonally():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x += 1
y -= 1
if chess_board.get((x, y)) is None:
if x > 7 or y < 0:
possible = False
break
#print("At location ({}, {})".format(x, y))
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x > 7 or y < 0:
possible = False
break
all_possible_captures.append((x, y))
possible = False
else:
possible = False
if current_pos[1] == 0:
if current_pos[0] > 0:
# Search left down diagonally
search_left_down_diagonally()
if current_pos[0] < 7:
# Search right down diagonally
search_right_down_diagonally()
else:
if current_pos[1] == 7 and 0 < current_pos[0] < 7:
search_left_up_diagonally()
search_right_up_diagonally()
else:
if current_pos[0] > 0:
search_left_down_diagonally()
search_right_down_diagonally()
search_left_up_diagonally()
search_right_up_diagonally()
if current_pos[0] == 0 and current_pos[1] < 7:
search_right_up_diagonally()
search_right_down_diagonally()
if current_pos[0] == 7 and current_pos[1] < 7:
search_left_up_diagonally()
search_left_down_diagonally()
if current_pos[1] == 7 and current_pos[0] == 0:
search_right_up_diagonally()
return all_possible_moves, all_possible_captures
def queen_calculate_all_possible_moves(current_pos, chess_board):
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
moves, captures = bishop_calculate_all_possible_moves(current_pos, chess_board)
all_possible_moves.extend(moves)
all_possible_captures.extend(captures)
def search_down():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
y += 1
if chess_board.get((x, y)) is None:
if y == 7:
possible = False
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if y == 7:
possible = False
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_up():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
y -= 1
if chess_board.get((x, y)) is None:
if y == 0:
possible = False
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if y == 0:
possible = False
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_left():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x -= 1
if chess_board.get((x, y)) is None:
if x == 0:
possible = False
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x == 0:
possible = False
all_possible_captures.append((x, y))
possible = False
else:
possible = False
def search_right():
x = current_pos[0]
y = current_pos[1]
possible = True
while possible:
x += 1
if chess_board.get((x, y)) is None:
if x == 7:
possible = False
all_possible_moves.append((x, y))
elif chess_board.get((x, y)).get_player_side() != player:
if x == 7:
possible = False
all_possible_captures.append((x, y))
possible = False
else:
possible = False
if current_pos[1] == 0 and 0 < current_pos[0] < 7:
search_down()
search_left()
search_right()
if current_pos[1] == 0 and current_pos[0] == 0:
search_right()
search_down()
if current_pos[1] == 0 and current_pos[0] == 7:
search_left()
search_down()
if current_pos[1] == 7 and 0 < current_pos[0] < 7:
search_up()
search_left()
search_right()
if current_pos[1] == 7 and current_pos[0] == 0:
search_right()
search_up()
if current_pos[1] == 7 and current_pos[0] == 7:
search_left()
search_up()
if 0 < current_pos[1] < 7 and 0 < current_pos[0] < 7:
search_right()
search_left()
search_up()
search_down()
if 0 < current_pos[1] < 7 and current_pos[0] == 0:
search_up()
search_right()
search_down()
#print(all_possible_moves)
return all_possible_moves, all_possible_captures
def king_calculate_all_possible_moves(current_pos, chess_board):
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
x = current_pos[0]
y = current_pos[1]
def search_down():
if chess_board.get((x, y + 1)) is None:
all_possible_moves.append((current_pos[0], (current_pos[1] + 1)))
elif chess_board.get((x, y + 1)).get_player_side() != player:
all_possible_captures.append((x, y + 1))
def search_up():
if chess_board.get((x, y - 1)) is None:
all_possible_moves.append((current_pos[0], (current_pos[1] - 1)))
elif chess_board.get((x, y - 1)).get_player_side() != player:
all_possible_captures.append((x, y - 1))
def search_left():
if chess_board.get((x - 1, y)) is None:
all_possible_moves.append((current_pos[0] - 1, current_pos[1]))
elif chess_board.get((x - 1, y)).get_player_side() != player:
all_possible_captures.append((x - 1, y))
def search_right():
if chess_board.get((x + 1, y)) is None:
all_possible_moves.append((current_pos[0] + 1, current_pos[1]))
elif chess_board.get((x + 1, y)).get_player_side() != player:
all_possible_captures.append((x + 1, y))
def search_left_up_diagonally():
if chess_board.get((x - 1, y - 1)) is None:
all_possible_moves.append((current_pos[0] - 1, current_pos[1] - 1))
elif chess_board.get((x - 1, y - 1)).get_player_side() != player:
all_possible_captures.append((x - 1, y - 1))
def search_left_down_diagonally():
if chess_board.get((x - 1, y + 1)) is None:
all_possible_moves.append((current_pos[0] - 1, current_pos[1] + 1))
elif chess_board.get((x - 1, y + 1)).get_player_side() != player:
all_possible_captures.append((x - 1, y + 1))
def search_right_up_diagonally():
if chess_board.get((x + 1, y - 1)) is None:
all_possible_moves.append((current_pos[0] + 1, current_pos[1] - 1))
elif chess_board.get((x + 1, y - 1)).get_player_side() != player:
all_possible_captures.append((x + 1, y - 1))
def search_right_down_diagonally():
if chess_board.get((x + 1, y + 1)) is None:
all_possible_moves.append((current_pos[0] + 1, current_pos[1] + 1))
elif chess_board.get((x + 1, y + 1)).get_player_side() != player:
all_possible_captures.append((x + 1, y + 1))
# y == 0 and 0 < x < 7
if current_pos[1] == 0 and 0 < current_pos[0] < 7:
search_down()
search_left()
search_right()
search_left_down_diagonally()
search_right_down_diagonally()
# y == 0 and x == 0
if current_pos[1] == 0 and current_pos[0] == 0:
search_right()
search_down()
search_right_down_diagonally()
# y == 0 and x == 7
if current_pos[1] == 0 and current_pos[0] == 7:
search_left()
search_down()
search_left_down_diagonally()
# y == 7 and 0 < x < 7
if current_pos[1] == 7 and 0 < current_pos[0] < 7:
search_up()
search_left()
search_right()
search_left_up_diagonally()
search_right_up_diagonally()
# y == 7 and x == 0
if current_pos[1] == 7 and current_pos[0] == 0:
search_right()
search_up()
search_right_up_diagonally()
# y == 7 and x == 7
if current_pos[1] == 7 and current_pos[0] == 7:
search_left()
search_up()
search_left_up_diagonally()
if 0 < current_pos[1] < 7 and current_pos[0] == 0:
search_up()
search_right()
search_down()
search_right_down_diagonally()
search_right_up_diagonally()
if 0 < current_pos[1] < 7 and current_pos[0] == 7:
search_up()
search_left()
search_down()
search_left_up_diagonally()
search_left_down_diagonally()
if 0 < current_pos[1] < 7 and 0 < current_pos[0] < 7:
search_right()
search_left()
search_up()
search_down()
search_right_up_diagonally()
search_left_up_diagonally()
search_left_down_diagonally()
search_right_down_diagonally()
return all_possible_moves, all_possible_captures
def pawn_calculate_all_possible_moves(current_pos, chess_board):
#print(current_pos)
player = chess_board.get(current_pos).get_player_side()
all_possible_moves = []
all_possible_captures = []
x = current_pos[0]
y = current_pos[1]
# If this is pawns first move, it can move two spaces
if chess_board[current_pos].get_is_first_move():
if y == 1:
all_possible_moves.append((x, y + 2))
if y == 6:
all_possible_moves.append((x, y - 2))
if chess_board[current_pos].get_name()[0:5] == "Black":
if y < 7:
if chess_board.get((x, y + 1)) is None:
all_possible_moves.append((x, y + 1))
if chess_board.get((x - 1, y + 1)) is not None and chess_board.get((x - 1, y + 1)).get_player_side() != player:
all_possible_captures.append((x - 1, y + 1))
if chess_board.get((x + 1, y + 1)) is not None and chess_board.get((x + 1, y + 1)).get_player_side() != player:
all_possible_captures.append((x + 1, y + 1))
# else pawn is a white piece
else:
if y > 0:
if chess_board.get((x, y - 1)) is None:
all_possible_moves.append((x, y - 1))
if chess_board.get((x - 1, y - 1)) is not None and chess_board.get((x - 1, y - 1)).get_player_side() != player:
all_possible_captures.append((x - 1, y - 1))
if chess_board.get((x + 1, y - 1)) is not None and chess_board.get((x + 1, y - 1)).get_player_side() != player:
all_possible_captures.append((x + 1, y - 1))
return all_possible_moves, all_possible_captures
| 24.144676
| 123
| 0.545228
| 2,704
| 20,861
| 3.933802
| 0.038831
| 0.118454
| 0.070885
| 0.063176
| 0.914168
| 0.905424
| 0.882674
| 0.873367
| 0.810473
| 0.768262
| 0
| 0.024513
| 0.35075
| 20,861
| 863
| 124
| 24.172654
| 0.760854
| 0.061694
| 0
| 0.816193
| 0
| 0
| 0.006911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052516
| false
| 0
| 0
| 0
| 0.107221
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9dbe250bb3ec584374e58cf5b830aaf5accb4f5c
| 183
|
py
|
Python
|
cassandra_s3_incremental_backup_watcher/util.py
|
Cobliteam/cassandra-s3-incremental-backup-watcher
|
65e7f798ebdb3bc3fdb60847799fa736348f602d
|
[
"MIT"
] | 1
|
2020-06-01T09:41:06.000Z
|
2020-06-01T09:41:06.000Z
|
cassandra_s3_incremental_backup_watcher/util.py
|
Cobliteam/cassandra-s3-incremental-backup-watcher
|
65e7f798ebdb3bc3fdb60847799fa736348f602d
|
[
"MIT"
] | null | null | null |
cassandra_s3_incremental_backup_watcher/util.py
|
Cobliteam/cassandra-s3-incremental-backup-watcher
|
65e7f798ebdb3bc3fdb60847799fa736348f602d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import re
def clean_s3_path(path):
path = re.sub(r'^/+', '', path)
path = re.sub(r'/+$', '', path)
return path
| 18.3
| 56
| 0.639344
| 26
| 183
| 4.192308
| 0.538462
| 0.220183
| 0.183486
| 0.238532
| 0.293578
| 0.293578
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0.196721
| 183
| 9
| 57
| 20.333333
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d1a3f1be28accc2299cc8b40a4eff31e1a0666df
| 1,103
|
py
|
Python
|
blog/models.py
|
bilal-yousuf/django_local_library
|
18dbf298253a097412a2bf365dc6b3a557a634a2
|
[
"MIT"
] | null | null | null |
blog/models.py
|
bilal-yousuf/django_local_library
|
18dbf298253a097412a2bf365dc6b3a557a634a2
|
[
"MIT"
] | 7
|
2020-02-12T00:31:18.000Z
|
2022-03-12T00:34:07.000Z
|
blog/models.py
|
bilal-yousuf/django_local_library
|
18dbf298253a097412a2bf365dc6b3a557a634a2
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse
from datetime import date
#from ckeditor.fields import RichTextField
# Create your models here.
class Blog(models.Model):
"""Model representing a blog post."""
title = models.CharField(max_length=200)
body = models.TextField(help_text="What is the body of your blog post?")
pub_date = models.DateField(auto_now_add=True)
pub_time = models.TimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('blog-detail', args=[str(self.id)])
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_date', '-pub_time']
# Create your models here.
class Note(models.Model):
"""Model representing a blog post."""
title = models.CharField(max_length=200)
body = models.TextField(help_text="What is the body of your blog post?")
pub_date = models.DateField(auto_now_add=True)
pub_time = models.TimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('note-detail', args=[str(self.id)])
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_date', '-pub_time']
| 24.511111
| 73
| 0.739801
| 167
| 1,103
| 4.694611
| 0.329341
| 0.040816
| 0.05102
| 0.071429
| 0.839286
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0
| 0.006303
| 0.136899
| 1,103
| 45
| 74
| 24.511111
| 0.817227
| 0.140526
| 0
| 0.72
| 0
| 0
| 0.136752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.12
| 0.16
| 0.92
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
d1c4193d3edb2bf7c45e926be8a31f3f9568643e
| 140
|
py
|
Python
|
convoy/utils/__init__.py
|
frain-dev/convoy-python
|
7607a6b65615cc83c38bfb7dba4ad6ed564860bc
|
[
"MIT"
] | null | null | null |
convoy/utils/__init__.py
|
frain-dev/convoy-python
|
7607a6b65615cc83c38bfb7dba4ad6ed564860bc
|
[
"MIT"
] | null | null | null |
convoy/utils/__init__.py
|
frain-dev/convoy-python
|
7607a6b65615cc83c38bfb7dba4ad6ed564860bc
|
[
"MIT"
] | null | null | null |
from convoy.utils.helpers import responseHelper
from convoy.utils.helpers import verifySignature
from convoy.utils.helpers import hashString
| 46.666667
| 48
| 0.878571
| 18
| 140
| 6.833333
| 0.444444
| 0.243902
| 0.365854
| 0.536585
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078571
| 140
| 3
| 49
| 46.666667
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d1ef2734f13f6cdbaf17c5709cb943fed9d03cb8
| 767
|
py
|
Python
|
src/tf_transformers/utils/__init__.py
|
s4sarath/tf-transformers
|
361f7b01c7816034ddfc8661f8b6a967835bc1de
|
[
"Apache-2.0"
] | 1
|
2021-09-13T07:21:15.000Z
|
2021-09-13T07:21:15.000Z
|
src/tf_transformers/utils/__init__.py
|
Vibha111094/tf-transformers
|
f26d440a4de0557e0e481279bfd70a732aaa8825
|
[
"Apache-2.0"
] | null | null | null |
src/tf_transformers/utils/__init__.py
|
Vibha111094/tf-transformers
|
f26d440a4de0557e0e481279bfd70a732aaa8825
|
[
"Apache-2.0"
] | null | null | null |
from tf_transformers.utils.convert.convert_albert import convert_albert_hf_to_tf_transformers
from tf_transformers.utils.convert.convert_bert import convert_bert_hf_to_tf_transformers
from tf_transformers.utils.convert.convert_gpt2 import convert_gpt2_hf_to_tf_transformers
from tf_transformers.utils.convert.convert_roberta import convert_roberta_hf_to_tf_transformers
from tf_transformers.utils.convert.convert_t5 import convert_t5_hf_to_tf_transformers
from tf_transformers.utils.convert.convert_mt5 import convert_mt5_hf_to_tf_transformers
from tf_transformers.utils.fast_sp_alignment import fast_sp_alignment
from tf_transformers.utils.tokenization import BasicTokenizer
from tf_transformers.utils.utils import get_config, get_model_wrapper, validate_model_name
| 76.7
| 95
| 0.916558
| 116
| 767
| 5.594828
| 0.215517
| 0.323575
| 0.249615
| 0.318952
| 0.543914
| 0.543914
| 0.486903
| 0.486903
| 0.423729
| 0.423729
| 0
| 0.00823
| 0.049544
| 767
| 9
| 96
| 85.222222
| 0.88203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ae3c160751268fe8a0b62fa16ae7bea32cddb36e
| 114
|
py
|
Python
|
django_changeset/models/__init__.py
|
beachmachine/django-changeset
|
9ce8edbcbdf9d721c683f6890a8ee76486380ded
|
[
"BSD-3-Clause"
] | 7
|
2016-04-29T19:46:43.000Z
|
2020-03-30T16:19:14.000Z
|
django_changeset/models/__init__.py
|
beachmachine/django-changeset
|
9ce8edbcbdf9d721c683f6890a8ee76486380ded
|
[
"BSD-3-Clause"
] | 3
|
2019-06-03T12:35:16.000Z
|
2021-10-15T07:31:56.000Z
|
django_changeset/models/__init__.py
|
beachmachine/django-changeset
|
9ce8edbcbdf9d721c683f6890a8ee76486380ded
|
[
"BSD-3-Clause"
] | 7
|
2020-01-08T09:13:33.000Z
|
2020-10-09T12:05:31.000Z
|
# -*- coding: utf-8 -*-
from django_changeset.models.models import *
from django_changeset.models.mixins import *
| 28.5
| 44
| 0.754386
| 15
| 114
| 5.6
| 0.6
| 0.238095
| 0.452381
| 0.595238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.114035
| 114
| 3
| 45
| 38
| 0.821782
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ae623763da0ebbd1a9caf0577b2912b6db545c78
| 11,025
|
py
|
Python
|
lang/python/github/com/metaprov/modelaapi/services/lab/v1/lab_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 5
|
2022-02-18T03:40:10.000Z
|
2022-03-01T16:11:24.000Z
|
lang/python/github/com/metaprov/modelaapi/services/lab/v1/lab_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-01-07T19:59:25.000Z
|
2022-02-04T01:21:14.000Z
|
lang/python/github/com/metaprov/modelaapi/services/lab/v1/lab_pb2_grpc.py
|
metaprov/modeldapi
|
ee05693832051dcd990ee4f061715d7ae0787340
|
[
"Apache-2.0"
] | 1
|
2022-03-25T10:21:43.000Z
|
2022-03-25T10:21:43.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from github.com.metaprov.modelaapi.services.lab.v1 import lab_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2
class LabServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListLabs = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.lab.v1.LabService/ListLabs',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsResponse.FromString,
)
self.CreateLab = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.lab.v1.LabService/CreateLab',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabResponse.FromString,
)
self.GetLab = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.lab.v1.LabService/GetLab',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabResponse.FromString,
)
self.UpdateLab = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.lab.v1.LabService/UpdateLab',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabResponse.FromString,
)
self.DeleteLab = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.lab.v1.LabService/DeleteLab',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabResponse.FromString,
)
class LabServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def ListLabs(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateLab(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLab(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateLab(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteLab(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LabServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListLabs': grpc.unary_unary_rpc_method_handler(
servicer.ListLabs,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsResponse.SerializeToString,
),
'CreateLab': grpc.unary_unary_rpc_method_handler(
servicer.CreateLab,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabResponse.SerializeToString,
),
'GetLab': grpc.unary_unary_rpc_method_handler(
servicer.GetLab,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabResponse.SerializeToString,
),
'UpdateLab': grpc.unary_unary_rpc_method_handler(
servicer.UpdateLab,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabResponse.SerializeToString,
),
'DeleteLab': grpc.unary_unary_rpc_method_handler(
servicer.DeleteLab,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'github.com.metaprov.modelaapi.services.lab.v1.LabService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class LabService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ListLabs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.lab.v1.LabService/ListLabs',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.ListLabsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateLab(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.lab.v1.LabService/CreateLab',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.CreateLabResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLab(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.lab.v1.LabService/GetLab',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.GetLabResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateLab(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.lab.v1.LabService/UpdateLab',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.UpdateLabResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteLab(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.lab.v1.LabService/DeleteLab',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_lab_dot_v1_dot_lab__pb2.DeleteLabResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 55.40201
| 159
| 0.726349
| 1,233
| 11,025
| 5.995134
| 0.089213
| 0.050325
| 0.050325
| 0.062906
| 0.895563
| 0.895563
| 0.895563
| 0.864583
| 0.850649
| 0.824134
| 0
| 0.008594
| 0.208435
| 11,025
| 198
| 160
| 55.681818
| 0.838432
| 0.062222
| 0
| 0.493827
| 1
| 0
| 0.096375
| 0.069967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.012346
| 0.030864
| 0.135802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae89e6a6b575079c9003b3e252e49ef8aae70b5a
| 103,198
|
py
|
Python
|
Architectures/x86/x86disassembly.py
|
Ernest1338/Threebe
|
8b2ffc8e7dd3a1bfb70e3194abef4b5a61704dcb
|
[
"MIT"
] | 2
|
2020-08-18T18:41:37.000Z
|
2021-03-22T04:10:47.000Z
|
Architectures/x86/x86disassembly.py
|
Ernest1338/Threebe
|
8b2ffc8e7dd3a1bfb70e3194abef4b5a61704dcb
|
[
"MIT"
] | 2
|
2021-08-11T09:38:45.000Z
|
2021-08-12T09:17:13.000Z
|
Architectures/x86/x86disassembly.py
|
Ernest1338/Threebe
|
8b2ffc8e7dd3a1bfb70e3194abef4b5a61704dcb
|
[
"MIT"
] | null | null | null |
# This file contains function(s) that translates raw bytes into assembly instructions using x86opcodesTable and some python logic.
import Architectures.x86.x86opcodesTable as x86opT
times = 0
def cancle_function_iteration(howmany):
global times
times += int(howmany)
def disassemble_x86(bytes, ascii_dict, colors):
global times
bcolors = colors
if bcolors.HEADER == '':
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[01m'
UNDERLINE = '\033[04m'
RESET = '\033[00m'
isClean = True
else:
isClean = False
offset1 = 134512640
counter1 = 0
for i in bytes:
if isClean:
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[01m'
UNDERLINE = '\033[04m'
RESET = '\033[00m'
if times == 0:
to_display = i
after_instruction = ""
after_byte = ""
if i in x86opT.x86opcodes:
instruction = x86opT.x86opcodes[i]
intruction_len_for_check = 51+len(instruction) # need to add to this after_instruction every time this variable (after_instruction) is usesd inside an if
should_print = True
# 1 byte instructions
if (i == "06" or i == "07" or i == "0E" or i == "16" or i == "17" or i == "1E" or i == "1F" or i == "27" or i == "2F" or i == "40" or i == "41" or i == "42" or i == "43"
or i == "44" or i == "45" or i == "46" or i == "47" or i == "48" or i == "49" or i == "4A" or i == "4B" or i == "4C" or i == "4D" or i == "4E" or i == "4F" or i == "50" or i == "51"
or i == "52" or i == "53" or i == "54" or i == "55" or i == "56" or i == "57" or i == "58" or i == "59" or i == "5A" or i == "5B" or i == "5C" or i == "5D" or i == "5E" or i == "5F"
or i == "61" or i == "90" or i == "98" or i == "99" or i == "C3" or i == "C9" or i == "CF" or i == "EC" or i == "ED" or i == "EE" or i == "EF" or i == "F8"):
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+" "+instruction.split(" ")[2]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
elif i == "01": # ADD
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="CA":
after_instruction = " edx, ecx"
elif bytes[counter1+1]=="D0":
after_instruction = " eax, edx"
elif bytes[counter1+1]=="01":
after_instruction = " dword [ecx], eax"
elif bytes[counter1+1]=="00":
after_instruction = " dword [eax], eax"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "04": # ADD al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "08": # OR
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="00":
after_instruction = " byte [eax], al"
elif bytes[counter1+1]=="01":
after_instruction = " byte [ecx], al"
elif bytes[counter1+1]=="02":
after_instruction = " byte [edx], al"
elif bytes[counter1+1]=="03":
after_instruction = " byte [ebx], al"
elif bytes[counter1+1]=="06":
after_instruction = " byte [esi], al"
elif bytes[counter1+1]=="07":
after_instruction = " byte [edi], al"
elif bytes[counter1+1]=="08":
after_instruction = " byte [eax], cl"
elif bytes[counter1+1]=="09":
after_instruction = " byte [ecx], cl"
elif bytes[counter1+1]=="0A":
after_instruction = " byte [edx], cl"
elif bytes[counter1+1]=="0B":
after_instruction = " byte [ebx], cl"
elif bytes[counter1+1]=="0E":
after_instruction = " byte [esx], cl"
elif bytes[counter1+1]=="0F":
after_instruction = " byte [edi], cl"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "0A": # OR
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="36":
after_instruction = " dh, byte [esi]"
elif bytes[counter1+1]=="00":
after_instruction = " al, byte [eax]"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "0B": # OR
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="00":
after_instruction = " eax, dword [eax]"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "0C": # OR al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "20": # AND
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="00":
after_instruction = " byte [eax], al"
elif bytes[counter1+1]=="01":
after_instruction = " byte [ecx], al"
elif bytes[counter1+1]=="02":
after_instruction = " byte [edx], al"
elif bytes[counter1+1]=="03":
after_instruction = " byte [ebx], al"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "24": # AND al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "29": # SUB
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="C6":
after_instruction = " esi, eax"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "2C": # SUB al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "30": # XOR
after_byte = " "+bytes[counter1+1]
XOR30var = 1
if bytes[counter1+1]=="00":
after_instruction = " byte [eax], al"
elif bytes[counter1+1]=="4D":
XOR30var2 = str(bytes[counter1+2])
if str(bytes[counter1+2][0])=="0":
XOR30var2 = str(bytes[counter1+2][1])
after_instruction = " byte [ebp + 0x"+XOR30var2+"], cl"
XOR30var = 2
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(XOR30var)
elif i == "31": # XOR
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="ED":
after_instruction = " ebp, ebp"
elif bytes[counter1+1]=="FF":
after_instruction = " edi, edi"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "32": # XOR
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="00":
after_instruction = " al, byte [eax]"
elif bytes[counter1+1]=="2E":
after_instruction = " ch, byte [esi]"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "34": # XOR al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif (i == "64" or i == "65" or i == "66" or i == "67"): # NOP
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="90":
after_instruction = ""
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "68": # PUSH string
after_byte = " "+bytes[counter1+1]+" "+bytes[counter1+2]+" "+bytes[counter1+3]+" "+bytes[counter1+4]
TEMPoffset = " 0x"+bytes[counter1+4]+bytes[counter1+3]+bytes[counter1+2]+bytes[counter1+1]
TEMPoffset_to_dict2 = hex(int(TEMPoffset,16)+1)
TEMPoffset_to_dict = hex(int(TEMPoffset,16))
if TEMPoffset_to_dict in ascii_dict:
after_instruction = TEMPoffset
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; str: "+str(ascii_dict[TEMPoffset_to_dict])
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; str: {bcolors.WARNING}"+str(ascii_dict[TEMPoffset_to_dict])
elif TEMPoffset_to_dict2 in ascii_dict:
after_instruction = TEMPoffset
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; str: "+str(ascii_dict[TEMPoffset_to_dict2])
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; str: {bcolors.WARNING}"+str(ascii_dict[TEMPoffset_to_dict2])
else:
after_instruction = TEMPoffset
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(4)
elif i == "70" or i == "71" or i == "72" or i == "73" or i == "74" or i == "75" or i == "7E" or i == "7F": # JE, JNE, JLE, JG, JO, JNO, JB, JAE
after_byte = " "+bytes[counter1+1]
if int(bytes[counter1+1],16)>=128:
if int(bytes[counter1+1],16)==255:
after_instruction = " "+hex(offset1+1)
elif int(bytes[counter1+1],16)==254:
after_instruction = " "+hex(offset1)
else:
after_instruction = " "+hex(offset1-(256-(int(bytes[counter1+1],16)+2)))
else:
after_instruction = " "+hex(offset1+(int(bytes[counter1+1],16)+2))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "81": # ADD, ...
after_byte = " "+bytes[counter1+1]+" "+bytes[counter1+2]+" "+bytes[counter1+3]+" "+bytes[counter1+4]+" "+bytes[counter1+5]
_81var = ""
if bytes[counter1+1]=="C3":
if bytes[counter1+5]!="00":
_81var += str(bytes[counter1+5])
if bytes[counter1+4]!="00":
_81var += str(bytes[counter1+4])
if bytes[counter1+3]!="00":
_81var += str(bytes[counter1+3])
if bytes[counter1+2]!="00":
_81var += str(bytes[counter1+2])
after_instruction = " ebx, 0x"+_81var
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(_81var,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(_81var,16))
elif bytes[counter1+1]=="EC":
instruction = "SUB"
if bytes[counter1+5]!="00":
_81var += str(bytes[counter1+5])
if bytes[counter1+4]!="00":
_81var += str(bytes[counter1+4])
if bytes[counter1+3]!="00":
_81var += str(bytes[counter1+3])
if bytes[counter1+2]!="00":
_81var += str(bytes[counter1+2])
after_instruction = " esp, 0x"+_81var
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(_81var,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(_81var,16))
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(5)
elif i == "83": # ADD, CMP, SUB, OR
after_byte = " "+bytes[counter1+1]+" "+bytes[counter1+2]
TEMPvar = bytes[counter1+2]
TEMPvar2 = 2
TEMPvar3 = bytes[counter1+3]
TEMPvar4 = str(int(TEMPvar,16))
if bytes[counter1+2][0]=="0":
TEMPvar = bytes[counter1+2][1]
if bytes[counter1+3][0]=="0":
TEMPvar3 = bytes[counter1+3][1]
if bytes[counter1+1]=="C2":
after_instruction = " edx, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="C4":
after_instruction = " esp, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="C7":
after_instruction = " edi, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="C0":
after_instruction = " eax, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="F8":
instruction = "CMP"
after_instruction = " eax, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="3B":
instruction = "CMP"
after_instruction = " dword [ebx], "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="EC":
instruction = "SUB"
after_instruction = " esp, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="EA":
instruction = "SUB"
after_instruction = " edx, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="08":
instruction = "OR"
if TEMPvar=="FF":
TEMPvar4 = "-1"
after_instruction = " dword [eax], "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
elif bytes[counter1+1]=="E4":
instruction = "AND"
if TEMPvar=="FF":
TEMPvar4 = "-1"
elif TEMPvar[0]=="F":
TEMPvar = "FFFFFF"+TEMPvar
after_instruction = " esp, "+"0x"+str(TEMPvar.lower())
elif bytes[counter1+1]=="45" and bytes[counter1+2]=="FC":
TEMPvar4 = str(int(bytes[counter1+3],16))
after_instruction = " dword [var_4h], "+"0x"+str(TEMPvar3.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
after_byte += " "+bytes[counter1+3]
TEMPvar2 = 4
elif bytes[counter1+1]=="7D" and bytes[counter1+2]=="08":
TEMPvar4 = str(int(bytes[counter1+3],16))
instruction = "CMP"
after_instruction = " dword [arg_8h], "+"0x"+str(TEMPvar3.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+TEMPvar4
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+TEMPvar4
after_byte += " "+bytes[counter1+3]
TEMPvar2 = 4
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(TEMPvar2)
elif i == "84": # TEST
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="C0":
after_instruction = " al, al"
elif bytes[counter1+1]=="C1":
after_instruction = " cl, al"
elif bytes[counter1+1]=="C2":
after_instruction = " dl, al"
elif bytes[counter1+1]=="C3":
after_instruction = " bl, al"
elif bytes[counter1+1]=="C4":
after_instruction = " ah, al"
elif bytes[counter1+1]=="C5":
after_instruction = " ch, al"
elif bytes[counter1+1]=="C6":
after_instruction = " dh, al"
elif bytes[counter1+1]=="C7":
after_instruction = " bh, al"
elif bytes[counter1+1]=="C8":
after_instruction = " al, cl"
elif bytes[counter1+1]=="C9":
after_instruction = " cl, cl"
elif bytes[counter1+1]=="CA":
after_instruction = " dl, cl"
elif bytes[counter1+1]=="CB":
after_instruction = " bl, cl"
elif bytes[counter1+1]=="CC":
after_instruction = " ah, cl"
elif bytes[counter1+1]=="CD":
after_instruction = " ch, cl"
elif bytes[counter1+1]=="CE":
after_instruction = " dh, cl"
elif bytes[counter1+1]=="CF":
after_instruction = " bh, cl"
elif bytes[counter1+1]=="D0":
after_instruction = " al, dl"
elif bytes[counter1+1]=="D1":
after_instruction = " cl, dl"
elif bytes[counter1+1]=="D2":
after_instruction = " dl, dl"
elif bytes[counter1+1]=="D3":
after_instruction = " bl, dl"
elif bytes[counter1+1]=="D4":
after_instruction = " ah, dl"
elif bytes[counter1+1]=="D5":
after_instruction = " ch, dl"
elif bytes[counter1+1]=="D6":
after_instruction = " dh, dl"
elif bytes[counter1+1]=="D7":
after_instruction = " bh, dl"
elif bytes[counter1+1]=="D8":
after_instruction = " al, bl"
elif bytes[counter1+1]=="D9":
after_instruction = " cl, bl"
elif bytes[counter1+1]=="DA":
after_instruction = " dl, bl"
elif bytes[counter1+1]=="DB":
after_instruction = " bl, bl"
elif bytes[counter1+1]=="DC":
after_instruction = " ah, bl"
elif bytes[counter1+1]=="DD":
after_instruction = " ch, bl"
elif bytes[counter1+1]=="DE":
after_instruction = " dh, bl"
elif bytes[counter1+1]=="DF":
after_instruction = " bh, bl"
elif bytes[counter1+1]=="E0":
after_instruction = " al, ah"
elif bytes[counter1+1]=="E1":
after_instruction = " cl, ah"
elif bytes[counter1+1]=="E2":
after_instruction = " dl, ah"
elif bytes[counter1+1]=="E3":
after_instruction = " bl, ah"
elif bytes[counter1+1]=="E4":
after_instruction = " ah, ah"
elif bytes[counter1+1]=="E5":
after_instruction = " ch, ah"
elif bytes[counter1+1]=="E6":
after_instruction = " dh, ah"
elif bytes[counter1+1]=="E7":
after_instruction = " bh, ah"
elif bytes[counter1+1]=="E8":
after_instruction = " al, ch"
elif bytes[counter1+1]=="E9":
after_instruction = " cl, ch"
elif bytes[counter1+1]=="EA":
after_instruction = " dl, ch"
elif bytes[counter1+1]=="EB":
after_instruction = " bl, ch"
elif bytes[counter1+1]=="EC":
after_instruction = " ah, ch"
elif bytes[counter1+1]=="ED":
after_instruction = " ch, ch"
elif bytes[counter1+1]=="EE":
after_instruction = " dh, ch"
elif bytes[counter1+1]=="EF":
after_instruction = " bh, ch"
elif bytes[counter1+1]=="F0":
after_instruction = " al, dh"
elif bytes[counter1+1]=="F1":
after_instruction = " cl, dh"
elif bytes[counter1+1]=="F2":
after_instruction = " dl, dh"
elif bytes[counter1+1]=="F3":
after_instruction = " bl, dh"
elif bytes[counter1+1]=="F4":
after_instruction = " ah, dh"
elif bytes[counter1+1]=="F5":
after_instruction = " ch, dh"
elif bytes[counter1+1]=="F6":
after_instruction = " dh, dh"
elif bytes[counter1+1]=="F7":
after_instruction = " bh, dh"
elif bytes[counter1+1]=="F8":
after_instruction = " al, bh"
elif bytes[counter1+1]=="F9":
after_instruction = " cl, bh"
elif bytes[counter1+1]=="FA":
after_instruction = " dl, bh"
elif bytes[counter1+1]=="FB":
after_instruction = " bl, bh"
elif bytes[counter1+1]=="FC":
after_instruction = " ah, bh"
elif bytes[counter1+1]=="FD":
after_instruction = " ch, bh"
elif bytes[counter1+1]=="FE":
after_instruction = " dh, bh"
elif bytes[counter1+1]=="FF":
after_instruction = " bh, bh"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "85": # TEST
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="C0":
after_instruction = " eax, eax"
elif bytes[counter1+1]=="C1":
after_instruction = " ecx, eax"
elif bytes[counter1+1]=="C2":
after_instruction = " edx, eax"
elif bytes[counter1+1]=="C3":
after_instruction = " ebx, eax"
elif bytes[counter1+1]=="C4":
after_instruction = " esp, eax"
elif bytes[counter1+1]=="C5":
after_instruction = " ebp, eax"
elif bytes[counter1+1]=="C6":
after_instruction = " esi, eax"
elif bytes[counter1+1]=="C7":
after_instruction = " edi, eax"
elif bytes[counter1+1]=="C8":
after_instruction = " eax, ecx"
elif bytes[counter1+1]=="C9":
after_instruction = " ecx, ecx"
elif bytes[counter1+1]=="CA":
after_instruction = " edx, ecx"
elif bytes[counter1+1]=="CB":
after_instruction = " ebx, ecx"
elif bytes[counter1+1]=="CC":
after_instruction = " esp, ecx"
elif bytes[counter1+1]=="CD":
after_instruction = " ebp, ecx"
elif bytes[counter1+1]=="CE":
after_instruction = " esi, ecx"
elif bytes[counter1+1]=="CF":
after_instruction = " edi, ecx"
elif bytes[counter1+1]=="D0":
after_instruction = " eax, edx"
elif bytes[counter1+1]=="D1":
after_instruction = " ecx, edx"
elif bytes[counter1+1]=="D2":
after_instruction = " edx, edx"
elif bytes[counter1+1]=="D3":
after_instruction = " ebx, edx"
elif bytes[counter1+1]=="D4":
after_instruction = " esp, edx"
elif bytes[counter1+1]=="D5":
after_instruction = " ebp, edx"
elif bytes[counter1+1]=="D6":
after_instruction = " esi, edx"
elif bytes[counter1+1]=="D7":
after_instruction = " edi, edx"
elif bytes[counter1+1]=="D8":
after_instruction = " eax, ebx"
elif bytes[counter1+1]=="D9":
after_instruction = " ecx, ebx"
elif bytes[counter1+1]=="DA":
after_instruction = " edx, ebx"
elif bytes[counter1+1]=="DB":
after_instruction = " ebx, ebx"
elif bytes[counter1+1]=="DC":
after_instruction = " esp, ebx"
elif bytes[counter1+1]=="DD":
after_instruction = " ebp, ebx"
elif bytes[counter1+1]=="DE":
after_instruction = " esi, ebx"
elif bytes[counter1+1]=="DF":
after_instruction = " edi, ebx"
elif bytes[counter1+1]=="E0":
after_instruction = " eax, esp"
elif bytes[counter1+1]=="E1":
after_instruction = " ecx, esp"
elif bytes[counter1+1]=="E2":
after_instruction = " edx, esp"
elif bytes[counter1+1]=="E3":
after_instruction = " ebx, esp"
elif bytes[counter1+1]=="E4":
after_instruction = " esp, esp"
elif bytes[counter1+1]=="E5":
after_instruction = " ebp, esp"
elif bytes[counter1+1]=="E6":
after_instruction = " esi, esp"
elif bytes[counter1+1]=="E7":
after_instruction = " edi, esp"
elif bytes[counter1+1]=="E8":
after_instruction = " eax, ebp"
elif bytes[counter1+1]=="E9":
after_instruction = " ecx, ebp"
elif bytes[counter1+1]=="EA":
after_instruction = " edx, ebp"
elif bytes[counter1+1]=="EB":
after_instruction = " ebx, ebp"
elif bytes[counter1+1]=="EC":
after_instruction = " esp, ebp"
elif bytes[counter1+1]=="ED":
after_instruction = " ebp, ebp"
elif bytes[counter1+1]=="EE":
after_instruction = " esi, ebp"
elif bytes[counter1+1]=="EF":
after_instruction = " edi, ebp"
elif bytes[counter1+1]=="F0":
after_instruction = " eax, esi"
elif bytes[counter1+1]=="F1":
after_instruction = " ecx, esi"
elif bytes[counter1+1]=="F2":
after_instruction = " edx, esi"
elif bytes[counter1+1]=="F3":
after_instruction = " ebx, esi"
elif bytes[counter1+1]=="F4":
after_instruction = " esp, esi"
elif bytes[counter1+1]=="F5":
after_instruction = " ebp, esi"
elif bytes[counter1+1]=="F6":
after_instruction = " esi, esi"
elif bytes[counter1+1]=="F7":
after_instruction = " edi, esi"
elif bytes[counter1+1]=="F8":
after_instruction = " eax, edi"
elif bytes[counter1+1]=="F9":
after_instruction = " ecx, edi"
elif bytes[counter1+1]=="FA":
after_instruction = " edx, edi"
elif bytes[counter1+1]=="FB":
after_instruction = " ebx, edi"
elif bytes[counter1+1]=="FC":
after_instruction = " esp, edi"
elif bytes[counter1+1]=="FD":
after_instruction = " ebp, edi"
elif bytes[counter1+1]=="FE":
after_instruction = " esi, edi"
elif bytes[counter1+1]=="FF":
after_instruction = " edi, edi"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "89": # MOV
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="C0":
after_instruction = " eax, eax"
elif bytes[counter1+1]=="C1":
after_instruction = " ecx, eax"
elif bytes[counter1+1]=="C2":
after_instruction = " edx, eax"
elif bytes[counter1+1]=="C3":
after_instruction = " ebx, eax"
elif bytes[counter1+1]=="C4":
after_instruction = " esp, eax"
elif bytes[counter1+1]=="C5":
after_instruction = " ebp, eax"
elif bytes[counter1+1]=="C6":
after_instruction = " esi, eax"
elif bytes[counter1+1]=="C7":
after_instruction = " edi, eax"
elif bytes[counter1+1]=="C8":
after_instruction = " eax, ecx"
elif bytes[counter1+1]=="C9":
after_instruction = " ecx, ecx"
elif bytes[counter1+1]=="CA":
after_instruction = " edx, ecx"
elif bytes[counter1+1]=="CB":
after_instruction = " ebx, ecx"
elif bytes[counter1+1]=="CC":
after_instruction = " esp, ecx"
elif bytes[counter1+1]=="CD":
after_instruction = " ebp, ecx"
elif bytes[counter1+1]=="CE":
after_instruction = " esi, ecx"
elif bytes[counter1+1]=="CF":
after_instruction = " edi, ecx"
elif bytes[counter1+1]=="D0":
after_instruction = " eax, edx"
elif bytes[counter1+1]=="D1":
after_instruction = " ecx, edx"
elif bytes[counter1+1]=="D2":
after_instruction = " edx, edx"
elif bytes[counter1+1]=="D3":
after_instruction = " ebx, edx"
elif bytes[counter1+1]=="D4":
after_instruction = " esp, edx"
elif bytes[counter1+1]=="D5":
after_instruction = " ebp, edx"
elif bytes[counter1+1]=="D6":
after_instruction = " esi, edx"
elif bytes[counter1+1]=="D7":
after_instruction = " edi, edx"
elif bytes[counter1+1]=="D8":
after_instruction = " eax, ebx"
elif bytes[counter1+1]=="D9":
after_instruction = " ecx, ebx"
elif bytes[counter1+1]=="DA":
after_instruction = " edx, ebx"
elif bytes[counter1+1]=="DB":
after_instruction = " ebx, ebx"
elif bytes[counter1+1]=="DC":
after_instruction = " esp, ebx"
elif bytes[counter1+1]=="DD":
after_instruction = " ebp, ebx"
elif bytes[counter1+1]=="DE":
after_instruction = " esi, ebx"
elif bytes[counter1+1]=="DF":
after_instruction = " edi, ebx"
elif bytes[counter1+1]=="E0":
after_instruction = " eax, esp"
elif bytes[counter1+1]=="E1":
after_instruction = " ecx, esp"
elif bytes[counter1+1]=="E2":
after_instruction = " edx, esp"
elif bytes[counter1+1]=="E3":
after_instruction = " ebx, esp"
elif bytes[counter1+1]=="E4":
after_instruction = " esp, esp"
elif bytes[counter1+1]=="E5":
after_instruction = " ebp, esp"
elif bytes[counter1+1]=="E6":
after_instruction = " esi, esp"
elif bytes[counter1+1]=="E7":
after_instruction = " edi, esp"
elif bytes[counter1+1]=="E8":
after_instruction = " eax, ebp"
elif bytes[counter1+1]=="E9":
after_instruction = " ecx, ebp"
elif bytes[counter1+1]=="EA":
after_instruction = " edx, ebp"
elif bytes[counter1+1]=="EB":
after_instruction = " ebx, ebp"
elif bytes[counter1+1]=="EC":
after_instruction = " esp, ebp"
elif bytes[counter1+1]=="ED":
after_instruction = " ebp, ebp"
elif bytes[counter1+1]=="EE":
after_instruction = " esi, ebp"
elif bytes[counter1+1]=="EF":
after_instruction = " edi, ebp"
elif bytes[counter1+1]=="F0":
after_instruction = " eax, esi"
elif bytes[counter1+1]=="F1":
after_instruction = " ecx, esi"
elif bytes[counter1+1]=="F2":
after_instruction = " edx, esi"
elif bytes[counter1+1]=="F3":
after_instruction = " ebx, esi"
elif bytes[counter1+1]=="F4":
after_instruction = " esp, esi"
elif bytes[counter1+1]=="F5":
after_instruction = " ebp, esi"
elif bytes[counter1+1]=="F6":
after_instruction = " esi, esi"
elif bytes[counter1+1]=="F7":
after_instruction = " edi, esi"
elif bytes[counter1+1]=="F8":
after_instruction = " eax, edi"
elif bytes[counter1+1]=="F9":
after_instruction = " ecx, edi"
elif bytes[counter1+1]=="FA":
after_instruction = " edx, edi"
elif bytes[counter1+1]=="FB":
after_instruction = " ebx, edi"
elif bytes[counter1+1]=="FC":
after_instruction = " esp, edi"
elif bytes[counter1+1]=="FD":
after_instruction = " ebp, edi"
elif bytes[counter1+1]=="FE":
after_instruction = " esi, edi"
elif bytes[counter1+1]=="FF":
after_instruction = " edi, edi"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "8B": # MOV
after_byte = " "+bytes[counter1+1]
TEMPvar = 1
if bytes[counter1+1]=="1C":
if bytes[counter1+2]=="24":
after_instruction = " ebx, dword [esp]"
after_byte += " "+bytes[counter1+2]
TEMPvar += 1
elif bytes[counter1+1]=="10":
after_instruction = " edx, dword [eax]"
elif bytes[counter1+1]=="55":
after_instruction = " edx, dword [var_4h]"
elif bytes[counter1+1]=="45":
after_instruction = " eax, dword [arg_8h]"
elif bytes[counter1+1]=="4D":
after_instruction = " ecx, dword [var_4h]"
elif bytes[counter1+1]=="00":
after_instruction = " eax, dword [eax]"
elif bytes[counter1+1]=="6C":
after_instruction = " ebp, dword [arg_4h]"
elif bytes[counter1+1]=="43":
TEMPvar2 = str(bytes[counter1+2])
if str(bytes[counter1+2][0])=="0":
TEMPvar2 = str(bytes[counter1+2][1])
after_instruction = " eax, dword [ebx + "+str(TEMPvar2)+"]"
TEMPvar = 2
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(TEMPvar)
elif i == "BA": # MOV edx, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " edx, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "E4": # IN al, <value>
after_byte = " "+bytes[counter1+1]
TEMPvar = bytes[counter1+1]
if bytes[counter1+1][0]=="0":
TEMPvar = bytes[counter1+1][1]
after_instruction = " al, "+"0x"+str(TEMPvar.lower())
lenWholeOpcode = len(instruction)+len(after_instruction)
if isClean:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+"; "+str(int(TEMPvar,16))
else:
after_instruction = after_instruction+" "*(32-lenWholeOpcode)+f"{bcolors.OKGREEN}; "+str(int(TEMPvar,16))
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
print(check1)
after_byte = ""
after_instruction = ""
cancle_function_iteration(1)
elif i == "F3": # RET
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="C3":
pass
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "F6": # NOT, NEG, IDIV, DIV, IMUL, MUL
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="FF":
instruction = "IDIV"
after_instruction = " bh"
elif bytes[counter1+1]=="FE":
instruction = "IDIV"
after_instruction = " dh"
elif bytes[counter1+1]=="FD":
instruction = "IDIV"
after_instruction = " ch"
elif bytes[counter1+1]=="FC":
instruction = "IDIV"
after_instruction = " ah"
elif bytes[counter1+1]=="FB":
instruction = "IDIV"
after_instruction = " bl"
elif bytes[counter1+1]=="FA":
instruction = "IDIV"
after_instruction = " dl"
elif bytes[counter1+1]=="F9":
instruction = "IDIV"
after_instruction = " cl"
elif bytes[counter1+1]=="F8":
instruction = "IDIV"
after_instruction = " al"
elif bytes[counter1+1]=="F7":
instruction = "DIV"
after_instruction = " bh"
elif bytes[counter1+1]=="F6":
instruction = "DIV"
after_instruction = " dh"
elif bytes[counter1+1]=="F5":
instruction = "DIV"
after_instruction = " ch"
elif bytes[counter1+1]=="F4":
instruction = "DIV"
after_instruction = " ah"
elif bytes[counter1+1]=="F3":
instruction = "DIV"
after_instruction = " bl"
elif bytes[counter1+1]=="F2":
instruction = "DIV"
after_instruction = " dl"
elif bytes[counter1+1]=="F1":
instruction = "DIV"
after_instruction = " cl"
elif bytes[counter1+1]=="F0":
instruction = "DIV"
after_instruction = " al"
elif bytes[counter1+1]=="EF":
instruction = "IMUL"
after_instruction = " bh"
elif bytes[counter1+1]=="EE":
instruction = "IMUL"
after_instruction = " dh"
elif bytes[counter1+1]=="ED":
instruction = "IMUL"
after_instruction = " ch"
elif bytes[counter1+1]=="EC":
instruction = "IMUL"
after_instruction = " ah"
elif bytes[counter1+1]=="EB":
instruction = "IMUL"
after_instruction = " bl"
elif bytes[counter1+1]=="EA":
instruction = "IMUL"
after_instruction = " dl"
elif bytes[counter1+1]=="E9":
instruction = "IMUL"
after_instruction = " cl"
elif bytes[counter1+1]=="E8":
instruction = "IMUL"
after_instruction = " al"
elif bytes[counter1+1]=="E7":
instruction = "MUL"
after_instruction = " bh"
elif bytes[counter1+1]=="E6":
instruction = "MUL"
after_instruction = " dh"
elif bytes[counter1+1]=="E5":
instruction = "MUL"
after_instruction = " ch"
elif bytes[counter1+1]=="E4":
instruction = "MUL"
after_instruction = " ah"
elif bytes[counter1+1]=="E3":
instruction = "MUL"
after_instruction = " bl"
elif bytes[counter1+1]=="E2":
instruction = "MUL"
after_instruction = " dl"
elif bytes[counter1+1]=="E1":
instruction = "MUL"
after_instruction = " cl"
elif bytes[counter1+1]=="E0":
instruction = "MUL"
after_instruction = " al"
elif bytes[counter1+1]=="DF":
instruction = "NEG"
after_instruction = " bh"
elif bytes[counter1+1]=="DE":
instruction = "NEG"
after_instruction = " dh"
elif bytes[counter1+1]=="DD":
instruction = "NEG"
after_instruction = " ch"
elif bytes[counter1+1]=="DC":
instruction = "NEG"
after_instruction = " ah"
elif bytes[counter1+1]=="DB":
instruction = "NEG"
after_instruction = " bl"
elif bytes[counter1+1]=="DA":
instruction = "NEG"
after_instruction = " dl"
elif bytes[counter1+1]=="D9":
instruction = "NEG"
after_instruction = " cl"
elif bytes[counter1+1]=="D8":
instruction = "NEG"
after_instruction = " al"
elif bytes[counter1+1]=="D7":
instruction = "NOT"
after_instruction = " bh"
elif bytes[counter1+1]=="D6":
instruction = "NOT"
after_instruction = " dh"
elif bytes[counter1+1]=="D5":
instruction = "NOT"
after_instruction = " ch"
elif bytes[counter1+1]=="D4":
instruction = "NOT"
after_instruction = " ah"
elif bytes[counter1+1]=="D3":
instruction = "NOT"
after_instruction = " bl"
elif bytes[counter1+1]=="D2":
instruction = "NOT"
after_instruction = " dl"
elif bytes[counter1+1]=="D1":
instruction = "NOT"
after_instruction = " cl"
elif bytes[counter1+1]=="D0":
instruction = "NOT"
after_instruction = " al"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "F7": # NOT, NEG, IDIV, DIV, IMUL, MUL
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="FF":
instruction = "IDIV"
after_instruction = " edi"
elif bytes[counter1+1]=="FE":
instruction = "IDIV"
after_instruction = " esi"
elif bytes[counter1+1]=="FD":
instruction = "IDIV"
after_instruction = " ebp"
elif bytes[counter1+1]=="FC":
instruction = "IDIV"
after_instruction = " esp"
elif bytes[counter1+1]=="FB":
instruction = "IDIV"
after_instruction = " ebx"
elif bytes[counter1+1]=="FA":
instruction = "IDIV"
after_instruction = " edx"
elif bytes[counter1+1]=="F9":
instruction = "IDIV"
after_instruction = " ecx"
elif bytes[counter1+1]=="F8":
instruction = "IDIV"
after_instruction = " eax"
elif bytes[counter1+1]=="F7":
instruction = "DIV"
after_instruction = " edi"
elif bytes[counter1+1]=="F6":
instruction = "DIV"
after_instruction = " esi"
elif bytes[counter1+1]=="F5":
instruction = "DIV"
after_instruction = " ebp"
elif bytes[counter1+1]=="F4":
instruction = "DIV"
after_instruction = " esp"
elif bytes[counter1+1]=="F3":
instruction = "DIV"
after_instruction = " ebx"
elif bytes[counter1+1]=="F2":
instruction = "DIV"
after_instruction = " edx"
elif bytes[counter1+1]=="F1":
instruction = "DIV"
after_instruction = " ecx"
elif bytes[counter1+1]=="F0":
instruction = "DIV"
after_instruction = " eax"
elif bytes[counter1+1]=="EF":
instruction = "IMUL"
after_instruction = " edi"
elif bytes[counter1+1]=="EE":
instruction = "IMUL"
after_instruction = " esi"
elif bytes[counter1+1]=="ED":
instruction = "IMUL"
after_instruction = " ebp"
elif bytes[counter1+1]=="EC":
instruction = "IMUL"
after_instruction = " esp"
elif bytes[counter1+1]=="EB":
instruction = "IMUL"
after_instruction = " ebx"
elif bytes[counter1+1]=="EA":
instruction = "IMUL"
after_instruction = " edx"
elif bytes[counter1+1]=="E9":
instruction = "IMUL"
after_instruction = " ecx"
elif bytes[counter1+1]=="E8":
instruction = "IMUL"
after_instruction = " eax"
elif bytes[counter1+1]=="E7":
instruction = "MUL"
after_instruction = " edi"
elif bytes[counter1+1]=="E6":
instruction = "MUL"
after_instruction = " esi"
elif bytes[counter1+1]=="E5":
instruction = "MUL"
after_instruction = " ebp"
elif bytes[counter1+1]=="E4":
instruction = "MUL"
after_instruction = " esp"
elif bytes[counter1+1]=="E3":
instruction = "MUL"
after_instruction = " ebx"
elif bytes[counter1+1]=="E2":
instruction = "MUL"
after_instruction = " edx"
elif bytes[counter1+1]=="E1":
instruction = "MUL"
after_instruction = " ecx"
elif bytes[counter1+1]=="E0":
instruction = "MUL"
after_instruction = " eax"
elif bytes[counter1+1]=="DF":
instruction = "NEG"
after_instruction = " edi"
elif bytes[counter1+1]=="DE":
instruction = "NEG"
after_instruction = " esi"
elif bytes[counter1+1]=="DD":
instruction = "NEG"
after_instruction = " ebp"
elif bytes[counter1+1]=="DC":
instruction = "NEG"
after_instruction = " esp"
elif bytes[counter1+1]=="DB":
instruction = "NEG"
after_instruction = " ebx"
elif bytes[counter1+1]=="DA":
instruction = "NEG"
after_instruction = " edx"
elif bytes[counter1+1]=="D9":
instruction = "NEG"
after_instruction = " ecx"
elif bytes[counter1+1]=="D8":
instruction = "NEG"
after_instruction = " eax"
elif bytes[counter1+1]=="D7":
instruction = "NOT"
after_instruction = " edi"
elif bytes[counter1+1]=="D6":
instruction = "NOT"
after_instruction = " esi"
elif bytes[counter1+1]=="D5":
instruction = "NOT"
after_instruction = " ebp"
elif bytes[counter1+1]=="D4":
instruction = "NOT"
after_instruction = " esp"
elif bytes[counter1+1]=="D3":
instruction = "NOT"
after_instruction = " ebx"
elif bytes[counter1+1]=="D2":
instruction = "NOT"
after_instruction = " edx"
elif bytes[counter1+1]=="D1":
instruction = "NOT"
after_instruction = " ecx"
elif bytes[counter1+1]=="D0":
instruction = "NOT"
after_instruction = " eax"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "FF": # CALL
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="D0":
after_instruction = " eax"
elif bytes[counter1+1]=="D1":
after_instruction = " ecx"
elif bytes[counter1+1]=="D2":
after_instruction = " edx"
elif bytes[counter1+1]=="D3":
after_instruction = " ebx"
elif bytes[counter1+1]=="D4":
after_instruction = " esp"
elif bytes[counter1+1]=="D5":
after_instruction = " ebp"
elif bytes[counter1+1]=="D6":
after_instruction = " esi"
elif bytes[counter1+1]=="D7":
after_instruction = " edi"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(1)
elif i == "0F": # PUSH fs, PUSH gs, POP fs, POP gs, ...
TEMPvar = 1
after_byte = " "+bytes[counter1+1]
if bytes[counter1+1]=="A0":
instruction = "PUSH"
after_instruction = " fs"
elif bytes[counter1+1]=="A1":
instruction = "POP"
after_instruction = " fs"
elif bytes[counter1+1]=="A8":
instruction = "PUSH"
after_instruction = " gs"
elif bytes[counter1+1]=="A9":
instruction = "POP"
after_instruction = " gs"
else:
should_print = False
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.WARNING}"+instruction+after_instruction+f"{bcolors.RESET}"
intruction_len_for_check = 51+len(instruction)+len(after_instruction)
if len(check1) < intruction_len_for_check:
for _ in range(intruction_len_for_check-len(check1)):
after_byte += " "
if isClean:
bcolors = colors
try:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+" "+f"{bcolors.WARNING}"+instruction.split(" ")[1]+after_instruction+f"{bcolors.RESET}"
except:
check1 = f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+after_byte+f"{bcolors.OKGREEN}"+instruction.split(" ")[0]+f"{bcolors.WARNING}"+after_instruction+f"{bcolors.RESET}"
if should_print:
print(check1)
after_byte = ""
after_instruction = ""
if should_print:
cancle_function_iteration(TEMPvar)
else:
pass
#bcolors = colors
#print(f"{bcolors.OKBLUE}"+str(hex(offset1))+" "+f"{bcolors.FAIL}"+to_display+" "+f"{bcolors.WARNING}"+"???"+f"{bcolors.RESET}")
else:
times -= 1
offset1 += 1
counter1 += 1
return True
| 59.548759
| 284
| 0.460813
| 9,378
| 103,198
| 4.921518
| 0.034975
| 0.219786
| 0.12467
| 0.12987
| 0.945097
| 0.929367
| 0.92317
| 0.858929
| 0.852234
| 0.741301
| 0
| 0.03725
| 0.413903
| 103,198
| 1,732
| 285
| 59.583141
| 0.725828
| 0.007394
| 0
| 0.868249
| 0
| 0
| 0.117921
| 0
| 0.011276
| 0
| 0
| 0
| 0
| 1
| 0.001187
| false
| 0.001187
| 0.000593
| 0
| 0.013056
| 0.05638
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
88165805b526770855010a689c896688df331c0c
| 16,969
|
py
|
Python
|
ucloud/services/uddb/schemas/apis.py
|
ucloud/ucloud-sdk-python3
|
b96a079a5e747228049129d83f03a8067a05e881
|
[
"Apache-2.0"
] | 37
|
2019-06-19T09:41:34.000Z
|
2022-02-18T08:06:00.000Z
|
ucloud/services/uddb/schemas/apis.py
|
ucloud/ucloud-sdk-python3
|
b96a079a5e747228049129d83f03a8067a05e881
|
[
"Apache-2.0"
] | 90
|
2019-08-09T09:27:33.000Z
|
2022-03-30T15:54:55.000Z
|
ucloud/services/uddb/schemas/apis.py
|
ucloud/ucloud-sdk-python3
|
b96a079a5e747228049129d83f03a8067a05e881
|
[
"Apache-2.0"
] | 19
|
2019-06-13T02:46:01.000Z
|
2021-11-01T07:22:18.000Z
|
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
from ucloud.core.typesystem import schema, fields
from ucloud.services.uddb.schemas import models
""" UDDB API Schema
"""
"""
API: ChangeUDDBInstanceName
修改分布式数据库中间件名称
"""
class ChangeUDDBInstanceNameRequestSchema(schema.RequestSchema):
"""ChangeUDDBInstanceName - 修改分布式数据库中间件名称"""
fields = {
"NewName": fields.Str(required=True, dump_to="NewName"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class ChangeUDDBInstanceNameResponseSchema(schema.ResponseSchema):
"""ChangeUDDBInstanceName - 修改分布式数据库中间件名称"""
fields = {}
"""
API: ChangeUDDBSlaveCount
改变分布式数据库数据节点的只读实例个数
每一个UDDB的数据节点负责处理所有的写入请求。与此同时,每一个数据节点可以配置若干个该节点的只读实例。当主节点的数据写入完毕后,只读实例把这次的写入操作进行更新,从而和数据节点保持一致。
只读实例可以使得数据由多份复制,在数据节点和只读实例之间,可以做请求的读写分离, 也就是说, 主节点写入数据之后, 数据的读操作可以由数据只读实例进行分担, 这样减少主节点的压力, 增加性能
当改变了数据节点的只读实例个数之后,对于现有的和以后的每一个数据节点都采用这个配置。如果UDDB实例有现有的数据节点, 那么它会根据新配置的参数,自动创建或删除数据节点的只读实例
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"ChangingSlaveCount"; 如果返回失败, UDDB实例状态保持不变 当UDDB更改数据分区的只读实例个数成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果更改过程中出现异常, 状态变成"Abnormal"(异常运行中)或者"Error"(运行错误)
"""
class ChangeUDDBSlaveCountRequestSchema(schema.RequestSchema):
"""ChangeUDDBSlaveCount - 改变分布式数据库数据节点的只读实例个数
每一个UDDB的数据节点负责处理所有的写入请求。与此同时,每一个数据节点可以配置若干个该节点的只读实例。当主节点的数据写入完毕后,只读实例把这次的写入操作进行更新,从而和数据节点保持一致。
只读实例可以使得数据由多份复制,在数据节点和只读实例之间,可以做请求的读写分离, 也就是说, 主节点写入数据之后, 数据的读操作可以由数据只读实例进行分担, 这样减少主节点的压力, 增加性能
当改变了数据节点的只读实例个数之后,对于现有的和以后的每一个数据节点都采用这个配置。如果UDDB实例有现有的数据节点, 那么它会根据新配置的参数,自动创建或删除数据节点的只读实例
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"ChangingSlaveCount"; 如果返回失败, UDDB实例状态保持不变 当UDDB更改数据分区的只读实例个数成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果更改过程中出现异常, 状态变成"Abnormal"(异常运行中)或者"Error"(运行错误)
"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"SlaveCount": fields.Str(required=True, dump_to="SlaveCount"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class ChangeUDDBSlaveCountResponseSchema(schema.ResponseSchema):
"""ChangeUDDBSlaveCount - 改变分布式数据库数据节点的只读实例个数
每一个UDDB的数据节点负责处理所有的写入请求。与此同时,每一个数据节点可以配置若干个该节点的只读实例。当主节点的数据写入完毕后,只读实例把这次的写入操作进行更新,从而和数据节点保持一致。
只读实例可以使得数据由多份复制,在数据节点和只读实例之间,可以做请求的读写分离, 也就是说, 主节点写入数据之后, 数据的读操作可以由数据只读实例进行分担, 这样减少主节点的压力, 增加性能
当改变了数据节点的只读实例个数之后,对于现有的和以后的每一个数据节点都采用这个配置。如果UDDB实例有现有的数据节点, 那么它会根据新配置的参数,自动创建或删除数据节点的只读实例
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"ChangingSlaveCount"; 如果返回失败, UDDB实例状态保持不变 当UDDB更改数据分区的只读实例个数成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果更改过程中出现异常, 状态变成"Abnormal"(异常运行中)或者"Error"(运行错误)
"""
fields = {}
"""
API: CreateUDDBInstance
创建创建分布式数据库UDDB实例, 简称UDDB实例。
"""
class CreateUDDBInstanceRequestSchema(schema.RequestSchema):
"""CreateUDDBInstance - 创建创建分布式数据库UDDB实例, 简称UDDB实例。"""
fields = {
"AdminPassword": fields.Str(required=True, dump_to="AdminPassword"),
"AdminUser": fields.Str(required=False, dump_to="AdminUser"),
"ChargeType": fields.Str(required=False, dump_to="ChargeType"),
"CouponId": fields.Str(required=False, dump_to="CouponId"),
"DBTypeId": fields.Str(required=True, dump_to="DBTypeId"),
"DataNodeCount": fields.Int(required=True, dump_to="DataNodeCount"),
"DataNodeDiskSpace": fields.Int(
required=True, dump_to="DataNodeDiskSpace"
),
"DataNodeMemory": fields.Int(required=True, dump_to="DataNodeMemory"),
"DataNodeSlaveCount": fields.Int(
required=False, dump_to="DataNodeSlaveCount"
),
"InstanceMode": fields.Str(required=False, dump_to="InstanceMode"),
"InstanceType": fields.Str(required=False, dump_to="InstanceType"),
"Name": fields.Str(required=True, dump_to="Name"),
"Port": fields.Int(required=False, dump_to="Port"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Quantity": fields.Int(required=False, dump_to="Quantity"),
"Region": fields.Str(required=True, dump_to="Region"),
"RouterNodeNum": fields.Int(required=True, dump_to="RouterNodeNum"),
"RouterVersion": fields.Str(required=True, dump_to="RouterVersion"),
"SubnetId": fields.Str(required=False, dump_to="SubnetId"),
"VPCId": fields.Str(required=False, dump_to="VPCId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class CreateUDDBInstanceResponseSchema(schema.ResponseSchema):
"""CreateUDDBInstance - 创建创建分布式数据库UDDB实例, 简称UDDB实例。"""
fields = {
"Message": fields.Str(required=False, load_from="Message"),
"UDDBId": fields.Str(required=False, load_from="UDDBId"),
}
"""
API: DeleteUDDBInstance
删除UDDB实例。
如下状态的UDDB实例可以进行这个操作:
InitFail: 初始化失败
Shutoff: 已关闭
当请求返回成功之后,UDDB实例就已经被删除, 列表上看不到对应的UDDB实例
"""
class DeleteUDDBInstanceRequestSchema(schema.RequestSchema):
"""DeleteUDDBInstance - 删除UDDB实例。
如下状态的UDDB实例可以进行这个操作:
InitFail: 初始化失败
Shutoff: 已关闭
当请求返回成功之后,UDDB实例就已经被删除, 列表上看不到对应的UDDB实例
"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class DeleteUDDBInstanceResponseSchema(schema.ResponseSchema):
"""DeleteUDDBInstance - 删除UDDB实例。
如下状态的UDDB实例可以进行这个操作:
InitFail: 初始化失败
Shutoff: 已关闭
当请求返回成功之后,UDDB实例就已经被删除, 列表上看不到对应的UDDB实例
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
"""
API: DescribeUDDBInstance
获取分布式数据库UDDB的详细信息
"""
class DescribeUDDBInstanceRequestSchema(schema.RequestSchema):
"""DescribeUDDBInstance - 获取分布式数据库UDDB的详细信息"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class DescribeUDDBInstanceResponseSchema(schema.ResponseSchema):
"""DescribeUDDBInstance - 获取分布式数据库UDDB的详细信息"""
fields = {
"DataSet": fields.List(
models.DataSetUDDBSchema(), required=False, load_from="DataSet"
),
"Message": fields.Str(required=False, load_from="Message"),
}
"""
API: DescribeUDDBInstancePrice
获取分布式数据库UDDB价格
"""
class DescribeUDDBInstancePriceRequestSchema(schema.RequestSchema):
"""DescribeUDDBInstancePrice - 获取分布式数据库UDDB价格"""
fields = {
"ChargeType": fields.Str(required=False, dump_to="ChargeType"),
"DataNodeCount": fields.Int(required=True, dump_to="DataNodeCount"),
"DataNodeDiskSpace": fields.Int(
required=True, dump_to="DataNodeDiskSpace"
),
"DataNodeMemory": fields.Str(required=True, dump_to="DataNodeMemory"),
"DataNodeSlaveCount": fields.Int(
required=False, dump_to="DataNodeSlaveCount"
),
"InstanceMode": fields.Str(required=False, dump_to="InstanceMode"),
"InstanceType": fields.Str(required=False, dump_to="InstanceType"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Quantity": fields.Int(required=False, dump_to="Quantity"),
"Region": fields.Str(required=True, dump_to="Region"),
"RouterNodeNum": fields.Int(required=True, dump_to="RouterNodeNum"),
"RouterVersion": fields.Str(required=True, dump_to="RouterVersion"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class DescribeUDDBInstancePriceResponseSchema(schema.ResponseSchema):
"""DescribeUDDBInstancePrice - 获取分布式数据库UDDB价格"""
fields = {
"Message": fields.Str(required=False, load_from="Message"),
"PriceInfo": models.PriceDetailInfoSchema(),
}
"""
API: DescribeUDDBInstanceUpgradePrice
升级UDDB时,获取升级后的价格
"""
class DescribeUDDBInstanceUpgradePriceRequestSchema(schema.RequestSchema):
"""DescribeUDDBInstanceUpgradePrice - 升级UDDB时,获取升级后的价格"""
fields = {
"DataNodeCount": fields.Int(required=False, dump_to="DataNodeCount"),
"DataNodeDiskSpace": fields.Int(
required=False, dump_to="DataNodeDiskSpace"
),
"DataNodeMemory": fields.Int(required=False, dump_to="DataNodeMemory"),
"DataNodeSlaveCount": fields.Int(
required=False, dump_to="DataNodeSlaveCount"
),
"InstanceMode": fields.Str(required=False, dump_to="InstanceMode"),
"InstanceType": fields.Str(required=False, dump_to="InstanceType"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"RouterNodeNum": fields.Int(required=True, dump_to="RouterNodeNum"),
"RouterVersion": fields.Str(required=True, dump_to="RouterVersion"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class DescribeUDDBInstanceUpgradePriceResponseSchema(schema.ResponseSchema):
"""DescribeUDDBInstanceUpgradePrice - 升级UDDB时,获取升级后的价格"""
fields = {
"Message": fields.Str(required=False, load_from="Message"),
"PriceInfo": models.PriceInfoSchema(),
}
"""
API: RestartUDDBInstance
重启UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在重启过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭
"""
class RestartUDDBInstanceRequestSchema(schema.RequestSchema):
"""RestartUDDBInstance - 重启UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在重启过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭
"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class RestartUDDBInstanceResponseSchema(schema.ResponseSchema):
"""RestartUDDBInstance - 重启UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在重启过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
"""
API: StartUDDBInstance
启动UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在启动过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭)
"""
class StartUDDBInstanceRequestSchema(schema.RequestSchema):
"""StartUDDBInstance - 启动UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在启动过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭)
"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class StartUDDBInstanceResponseSchema(schema.ResponseSchema):
"""StartUDDBInstance - 启动UDDB实例,开始提供服务。
如下状态的UDDB实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"Starting"(启动中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在启动过程中, 当UDDB实例启动成功之后, UDDB实例的状态变成"Running"(正常运行中); 如果启动过程中出现异常, 状态变成"Abnormal"(异常运行中), 或者"Shutoff"(已关闭)
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
"""
API: StopUDDBInstance
关闭UDDB实例,停止提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Shutdown"(关闭中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在关闭过程中, 当UDDB实例关闭成功之后, UDDB实例的状态变成"Shutoff"(已关闭); 如果关闭过程中出现异常, 根据UDDB实例的情况, 状态变成"Abnormal"(异常运行中), 或者"Running"(正常运行中)
"""
class StopUDDBInstanceRequestSchema(schema.RequestSchema):
"""StopUDDBInstance - 关闭UDDB实例,停止提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Shutdown"(关闭中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在关闭过程中, 当UDDB实例关闭成功之后, UDDB实例的状态变成"Shutoff"(已关闭); 如果关闭过程中出现异常, 根据UDDB实例的情况, 状态变成"Abnormal"(异常运行中), 或者"Running"(正常运行中)
"""
fields = {
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=True, dump_to="Zone"),
}
class StopUDDBInstanceResponseSchema(schema.ResponseSchema):
"""StopUDDBInstance - 关闭UDDB实例,停止提供服务。
如下状态的UDDB实例可以进行这个操作:
Running: 正常运行中
Abnormal: 异常运行中
当请求返回成功之后,UDDB实例的状态变成"Shutdown"(关闭中); 如果返回失败, UDDB实例状态保持不变 UDDB实例在关闭过程中, 当UDDB实例关闭成功之后, UDDB实例的状态变成"Shutoff"(已关闭); 如果关闭过程中出现异常, 根据UDDB实例的情况, 状态变成"Abnormal"(异常运行中), 或者"Running"(正常运行中)
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
"""
API: UpgradeUDDBDataNode
升降级分布式数据库数据节点的配置, 提高/降低数据节点的数据容量和内存
所有数据节点以及其所挂载的只读实例的配置都受到影响
升降级数据节点的配置之后之后, 会按照数据节点新的磁盘和内存大小重新计费
如下状态的数据节点实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"UpgradingDataNode",相关数据节点的状态变成"Upgrading"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级结束之后, UDDB实例的状态变成"Shutoff"
"""
class UpgradeUDDBDataNodeRequestSchema(schema.RequestSchema):
"""UpgradeUDDBDataNode - 升降级分布式数据库数据节点的配置, 提高/降低数据节点的数据容量和内存
所有数据节点以及其所挂载的只读实例的配置都受到影响
升降级数据节点的配置之后之后, 会按照数据节点新的磁盘和内存大小重新计费
如下状态的数据节点实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"UpgradingDataNode",相关数据节点的状态变成"Upgrading"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级结束之后, UDDB实例的状态变成"Shutoff"
"""
fields = {
"CouponId": fields.Str(required=False, dump_to="CouponId"),
"DataNodeDiskSpace": fields.Int(
required=True, dump_to="DataNodeDiskSpace"
),
"DataNodeMemory": fields.Int(required=True, dump_to="DataNodeMemory"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=False, dump_to="Zone"),
}
class UpgradeUDDBDataNodeResponseSchema(schema.ResponseSchema):
"""UpgradeUDDBDataNode - 升降级分布式数据库数据节点的配置, 提高/降低数据节点的数据容量和内存
所有数据节点以及其所挂载的只读实例的配置都受到影响
升降级数据节点的配置之后之后, 会按照数据节点新的磁盘和内存大小重新计费
如下状态的数据节点实例可以进行这个操作:
Shutoff: 已关闭
当请求返回成功之后,UDDB实例的状态变成"UpgradingDataNode",相关数据节点的状态变成"Upgrading"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级结束之后, UDDB实例的状态变成"Shutoff"
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
"""
API: UpgradeUDDBInstance
升降级分布式数据库中间件的配置, 提高/降低请求处理的并发性
修改请求处理节点个数之后, 按照所有请求处理节点的总内存容量和CPU核数重新计费
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"UpgradingUDDB"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级成功之后, UDDB实例的状态变成"Running"; 如果更改过程中出现异常, 状态变成"Abnormal", 或者"Error"
"""
class UpgradeUDDBInstanceRequestSchema(schema.RequestSchema):
"""UpgradeUDDBInstance - 升降级分布式数据库中间件的配置, 提高/降低请求处理的并发性
修改请求处理节点个数之后, 按照所有请求处理节点的总内存容量和CPU核数重新计费
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"UpgradingUDDB"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级成功之后, UDDB实例的状态变成"Running"; 如果更改过程中出现异常, 状态变成"Abnormal", 或者"Error"
"""
fields = {
"CouponId": fields.Str(required=False, dump_to="CouponId"),
"ProjectId": fields.Str(required=True, dump_to="ProjectId"),
"Region": fields.Str(required=True, dump_to="Region"),
"RouterNodeNum": fields.Int(required=True, dump_to="RouterNodeNum"),
"RouterVersion": fields.Str(required=True, dump_to="RouterVersion"),
"UDDBId": fields.Str(required=True, dump_to="UDDBId"),
"Zone": fields.Str(required=False, dump_to="Zone"),
}
class UpgradeUDDBInstanceResponseSchema(schema.ResponseSchema):
"""UpgradeUDDBInstance - 升降级分布式数据库中间件的配置, 提高/降低请求处理的并发性
修改请求处理节点个数之后, 按照所有请求处理节点的总内存容量和CPU核数重新计费
如下状态的UDDB实例可以进行这个操作:
Running: 系统正常运行中
当请求返回成功之后,UDDB实例的状态变成"UpgradingUDDB"; 如果返回失败, UDDB实例状态保持不变 当UDDB实例升级成功之后, UDDB实例的状态变成"Running"; 如果更改过程中出现异常, 状态变成"Abnormal", 或者"Error"
"""
fields = {
"Message": fields.Str(required=True, load_from="Message"),
}
| 32.507663
| 186
| 0.715187
| 1,526
| 16,969
| 7.885976
| 0.120577
| 0.044873
| 0.114426
| 0.097225
| 0.801479
| 0.801479
| 0.775553
| 0.766661
| 0.751371
| 0.729683
| 0
| 0
| 0.155578
| 16,969
| 521
| 187
| 32.570058
| 0.839835
| 0.262597
| 0
| 0.65625
| 1
| 0
| 0.185448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.005208
| 0.010417
| 0
| 0.260417
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
888169dfc6c081359c25cb16582871351d932fa4
| 12,240
|
py
|
Python
|
resources/test_cases/python/M2Crypto/python-code-test.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | 5
|
2021-09-13T11:24:13.000Z
|
2022-03-18T21:56:58.000Z
|
resources/test_cases/python/M2Crypto/python-code-test.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | null | null | null |
resources/test_cases/python/M2Crypto/python-code-test.py
|
stg-tud/licma
|
b899e6e682f7716d19e79d6ce7b73c28c6efd4cf
|
[
"MIT"
] | 1
|
2021-09-13T06:02:20.000Z
|
2021-09-13T06:02:20.000Z
|
from M2Crypto.EVP import Cipher
from M2Crypto.EVP import pbkdf2
import TestRule1
import TestRule2
import TestRule3
import TestRule4
import TestRule5
encryption_mode = 1
decryption_mode = 0
key = b"12345678123456781234567812345678"
iv_ecb = b"0000000000000000"
iv_cbc = b"1234567812345678"
password = b"12345678"
salt = b"12345678"
iter_eq_1000 = 1000
iter_eq_999 = 999
algorithm = "aes_256_ecb"
plaintext = b"abcdefghijklmnop"
def decrypt_aes_ecb(key, data):
cipher = Cipher("aes_256_ecb", key, iv_ecb, decryption_mode)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def decrypt_aes_cbc(key, iv, data):
cipher = Cipher("aes_256_cbc", key, iv, decryption_mode)
cipher_text = cipher.update(data) + cipher.final()
return cipher_text
def get_pbk(salt, iter):
key = pbkdf2(password, salt, iter, 32)
return key
if __name__ == '__main__':
# TestRule1 code
print("M2Crypto -> rule1 -> p_example1_hard_coded1:", decrypt_aes_ecb(key, TestRule1.p_example1_hard_coded(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example2_local_variable1:", decrypt_aes_ecb(key, TestRule1.p_example2_local_variable(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example3_nested_local_variable1:", decrypt_aes_ecb(key, TestRule1.p_example3_nested_local_variable(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example4_direct_method_call1:", decrypt_aes_ecb(key, TestRule1.p_example4_direct_method_call(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example5_nested_method_call1:", decrypt_aes_ecb(key, TestRule1.p_example5_nested_method_call(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example6_direct_g_variable_access1:", decrypt_aes_ecb(key, TestRule1.p_example6_direct_g_variable_access(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example7_indirect_g_variable_access1:", decrypt_aes_ecb(key, TestRule1.p_example7_indirect_g_variable_access(key, plaintext)) == plaintext)
print("M2Crypto -> rule1 -> p_example8_warning_parameter_not_resolvable:", decrypt_aes_ecb(key, TestRule1.p_example8_warning_parameter_not_resolvable(key, plaintext, algorithm)) == plaintext)
print("M2Crypto -> rule1 -> n_example1_cbc:", TestRule1.n_example1_cbc(key, iv_cbc, plaintext))
# TestRule2 code
print("M2Crypto -> rule2 -> p_example1_hard_coded1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example1_hard_coded1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example2_hard_coded2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example2_hard_coded2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example3_local_variable1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example3_local_variable1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example4_local_variable2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example4_local_variable2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example5_nested_local_variable1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example5_nested_local_variable1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example6_nested_local_variable2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example6_nested_local_variable2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example7_direct_method_call1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example7_direct_method_call1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example8_direct_method_call2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example8_direct_method_call2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example9_nested_method_call1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example9_nested_method_call1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example10_nested_method_call2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example10_nested_method_call2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example11_direct_g_variable_access1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example11_direct_g_variable_access1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example12_direct_g_variable_access2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example12_direct_g_variable_access2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example13_indirect_g_variable_access1:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example13_indirect_g_variable_access1(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example14_indirect_g_variable_access2:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example14_indirect_g_variable_access2(key, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> p_example15_warning_parameter_not_resolvable:", decrypt_aes_cbc(key, iv_cbc, TestRule2.p_example15_warning_parameter_not_resolvable(key, iv_cbc, plaintext)) == plaintext)
print("M2Crypto -> rule2 -> n_example1_secrets_system_random:", TestRule2.n_example1_secrets_system_random(key, plaintext))
# TestRule3 code
print("M2Crypto -> rule3 -> p_example1_hard_coded1:", decrypt_aes_ecb(key, TestRule3.p_example1_hard_coded1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example2_hard_coded2:", decrypt_aes_ecb(key, TestRule3.p_example2_hard_coded2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example3_local_variable1:", decrypt_aes_ecb(key, TestRule3.p_example3_local_variable1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example4_local_variable2:", decrypt_aes_ecb(key, TestRule3.p_example4_local_variable2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example5_nested_local_variable1:", decrypt_aes_ecb(key, TestRule3.p_example5_nested_local_variable1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example6_nested_local_variable2:", decrypt_aes_ecb(key, TestRule3.p_example6_nested_local_variable2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example7_direct_method_call1:", decrypt_aes_ecb(key, TestRule3.p_example7_direct_method_call1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example8_direct_method_call2:", decrypt_aes_ecb(key, TestRule3.p_example8_direct_method_call2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example9_nested_method_call1:", decrypt_aes_ecb(key, TestRule3.p_example9_nested_method_call1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example10_nested_method_call2:", decrypt_aes_ecb(key, TestRule3.p_example10_nested_method_call2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example11_direct_g_variable_access1:", decrypt_aes_ecb(key, TestRule3.p_example11_direct_g_variable_access1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example12_direct_g_variable_access2:", decrypt_aes_ecb(key, TestRule3.p_example12_direct_g_variable_access2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example13_indirect_g_variable_access1:", decrypt_aes_ecb(key, TestRule3.p_example13_indirect_g_variable_access1(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example14_indirect_g_variable_access2:", decrypt_aes_ecb(key, TestRule3.p_example14_indirect_g_variable_access2(plaintext)) == plaintext)
print("M2Crypto -> rule3 -> p_example15_warning_parameter_not_resolvable:", decrypt_aes_ecb(key, TestRule3.p_example15_warning_parameter_not_resolvable(key, plaintext)) == plaintext)
print("M2Crypto -> rule3 -> n_example1_random_key:", TestRule3.n_example1_random_key(plaintext))
# TestRule4 code
print("M2Crypto -> rule4 -> p_example1_hard_coded1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example1_hard_coded1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example2_hard_coded2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example2_hard_coded2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example3_local_variable1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example3_local_variable1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example4_local_variable2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example4_local_variable2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example5_nested_local_variable1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example5_nested_local_variable1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example6_nested_local_variable2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example6_nested_local_variable2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example7_direct_method_call1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example7_direct_method_call1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example8_direct_method_call2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example8_direct_method_call2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example9_nested_method_call1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example9_nested_method_call1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example10_nested_method_call2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example10_nested_method_call2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example11_direct_g_variable_access1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example11_direct_g_variable_access1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example12_direct_g_variable_access2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example12_direct_g_variable_access2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example13_indirect_g_variable_access1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example13_indirect_g_variable_access1(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example14_indirect_g_variable_access2:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example14_indirect_g_variable_access2(password, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> p_example15_warning_parameter_not_resolvable:", decrypt_aes_ecb(get_pbk(salt, iter_eq_1000), TestRule4.p_example15_warning_parameter_not_resolvable(password, salt, plaintext)) == plaintext)
print("M2Crypto -> rule4 -> n_example1_random_salt:", TestRule4.n_example1_random_salt(password, plaintext))
# TestRule5 code
print("M2Crypto -> rule5 -> p_example1_hard_coded1:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example1_hard_coded(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example2_local_variable:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example2_local_variable(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example3_nested_local_variable:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example3_nested_local_variable(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example4_direct_method_call:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example4_direct_method_call(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example5_nested_method_call:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example5_nested_method_call(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example6_direct_g_variable_access:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example6_direct_g_variable_access(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example7_indirect_g_variable_access:", decrypt_aes_ecb(get_pbk(salt, iter_eq_999), TestRule5.p_example7_indirect_g_variable_access(password, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> p_example8_warning_parameter_not_resolvable:", decrypt_aes_ecb(get_pbk(salt, 1000), TestRule5.p_example8_warning_parameter_not_resolvable(password, iter_eq_1000, plaintext)) == plaintext)
print("M2Crypto -> rule5 -> n_example1_iterations_eq_1000:", TestRule5.n_example1_iterations_eq_1000(password, plaintext))
| 106.434783
| 221
| 0.79518
| 1,636
| 12,240
| 5.477384
| 0.061125
| 0.095748
| 0.14976
| 0.207566
| 0.874902
| 0.795001
| 0.728825
| 0.670349
| 0.523937
| 0.298181
| 0
| 0.06077
| 0.091176
| 12,240
| 114
| 222
| 107.368421
| 0.744786
| 0.006046
| 0
| 0.041667
| 0
| 0
| 0.300493
| 0.176645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.28125
| 0.072917
| 0
| 0.135417
| 0.6875
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 7
|
88b6746a1cf99beaf1faf0dfc28adddc1b47eaff
| 10,819
|
py
|
Python
|
analysis/webservice/algorithms/colortables.py
|
tloubrieu-jpl/incubator-sdap-nexus
|
5bf903f04f12eb27f25ea2aa738c617ca404a87b
|
[
"Apache-2.0"
] | 17
|
2017-11-16T07:36:33.000Z
|
2021-11-07T00:02:20.000Z
|
analysis/webservice/algorithms/colortables.py
|
ifenty/incubator-sdap-nexus
|
3059c66f53d3f3d24c74d557c7632bdcc7f1eeec
|
[
"Apache-2.0"
] | 35
|
2018-01-11T00:50:20.000Z
|
2022-03-17T23:08:07.000Z
|
analysis/webservice/algorithms/colortables.py
|
ifenty/incubator-sdap-nexus
|
3059c66f53d3f3d24c74d557c7632bdcc7f1eeec
|
[
"Apache-2.0"
] | 25
|
2017-11-16T07:36:38.000Z
|
2022-02-03T20:48:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
grayscale = [
[0, 0, 0],
[255, 255, 255]
]
oceanography = [
[2, 3, 206],
[143, 226, 255],
[255, 255, 255],
[255, 241, 27],
[253, 0, 0]
]
rainbow = [
[125, 0, 255],
[0, 0, 255],
[0, 255, 0],
[255, 255, 0],
[255, 125, 0],
[255, 0, 0]
]
anomaly = [
[129, 31, 240],
[124, 30, 240],
[119, 29, 241],
[114, 27, 242],
[108, 26, 242],
[103, 24, 243],
[97, 23, 244],
[91, 21, 245],
[86, 20, 245],
[80, 18, 246],
[73, 17, 247],
[67, 15, 247],
[61, 14, 248],
[55, 12, 249],
[48, 11, 250],
[42, 9, 250],
[35, 7, 251],
[28, 6, 252],
[21, 4, 252],
[14, 3, 253],
[7, 1, 254],
[0, 0, 255],
[3, 6, 253],
[6, 12, 252],
[10, 18, 250],
[13, 24, 249],
[17, 30, 247],
[20, 36, 246],
[23, 42, 245],
[26, 47, 243],
[29, 52, 242],
[33, 57, 240],
[36, 62, 239],
[39, 67, 237],
[42, 72, 236],
[45, 76, 235],
[48, 81, 233],
[51, 85, 232],
[53, 89, 230],
[56, 93, 229],
[59, 97, 227],
[62, 101, 226],
[65, 105, 225],
[63, 106, 226],
[62, 107, 227],
[60, 108, 229],
[59, 110, 230],
[57, 111, 232],
[56, 113, 233],
[54, 115, 235],
[52, 116, 236],
[51, 118, 237],
[49, 120, 239],
[47, 122, 240],
[46, 123, 242],
[44, 125, 243],
[42, 127, 245],
[41, 130, 246],
[39, 132, 247],
[37, 134, 249],
[35, 136, 250],
[33, 139, 252],
[31, 141, 253],
[29, 144, 255],
[28, 145, 255],
[27, 147, 255],
[25, 149, 255],
[24, 151, 255],
[22, 153, 255],
[21, 155, 255],
[19, 158, 255],
[18, 160, 255],
[17, 162, 255],
[15, 164, 255],
[14, 166, 255],
[12, 169, 255],
[11, 171, 255],
[9, 173, 255],
[8, 176, 255],
[7, 178, 255],
[5, 180, 255],
[4, 183, 255],
[2, 185, 255],
[1, 188, 255],
[0, 191, 255],
[7, 191, 254],
[14, 191, 253],
[21, 191, 252],
[28, 191, 251],
[35, 192, 251],
[41, 192, 250],
[48, 193, 249],
[55, 193, 248],
[62, 194, 248],
[69, 194, 247],
[75, 195, 246],
[82, 196, 245],
[88, 196, 245],
[95, 197, 244],
[101, 198, 243],
[108, 199, 242],
[114, 200, 242],
[121, 201, 241],
[127, 202, 240],
[133, 203, 239],
[140, 205, 239],
[143, 206, 239],
[146, 208, 239],
[149, 209, 240],
[152, 211, 240],
[155, 212, 241],
[158, 214, 241],
[161, 215, 242],
[164, 217, 242],
[168, 218, 243],
[171, 220, 243],
[174, 221, 244],
[177, 222, 244],
[180, 224, 245],
[184, 225, 245],
[187, 227, 246],
[190, 228, 246],
[193, 230, 247],
[197, 231, 247],
[200, 233, 248],
[203, 234, 248],
[207, 236, 249],
[255, 255, 200],
[255, 254, 192],
[255, 254, 185],
[255, 253, 178],
[255, 252, 171],
[255, 251, 164],
[255, 250, 157],
[255, 249, 149],
[255, 248, 142],
[255, 247, 135],
[255, 246, 128],
[255, 244, 121],
[255, 243, 114],
[255, 241, 107],
[255, 239, 99],
[255, 238, 92],
[255, 236, 85],
[255, 234, 78],
[255, 231, 71],
[255, 229, 64],
[255, 227, 57],
[255, 225, 49],
[255, 222, 47],
[255, 220, 45],
[255, 218, 42],
[255, 215, 40],
[255, 213, 38],
[255, 211, 35],
[255, 208, 33],
[255, 206, 30],
[255, 203, 28],
[255, 201, 26],
[255, 198, 23],
[255, 195, 21],
[255, 193, 19],
[255, 190, 16],
[255, 187, 14],
[255, 184, 11],
[255, 181, 9],
[255, 179, 7],
[255, 176, 4],
[255, 173, 2],
[255, 170, 0],
[255, 167, 0],
[255, 164, 0],
[255, 161, 0],
[255, 158, 0],
[255, 155, 0],
[255, 152, 0],
[255, 149, 0],
[255, 147, 0],
[255, 144, 0],
[255, 141, 0],
[255, 138, 0],
[255, 135, 0],
[255, 132, 0],
[255, 129, 0],
[255, 127, 0],
[255, 124, 0],
[255, 121, 0],
[255, 118, 0],
[255, 115, 0],
[255, 112, 0],
[255, 110, 0],
[255, 104, 0],
[255, 99, 0],
[255, 94, 0],
[255, 89, 0],
[255, 83, 0],
[255, 78, 0],
[255, 73, 0],
[255, 68, 0],
[255, 62, 0],
[255, 57, 0],
[255, 52, 0],
[255, 47, 0],
[255, 41, 0],
[255, 36, 0],
[255, 31, 0],
[255, 26, 0],
[255, 20, 0],
[255, 15, 0],
[255, 10, 0],
[255, 5, 0],
[255, 0, 0],
[252, 0, 0],
[249, 0, 0],
[247, 0, 0],
[244, 0, 0],
[241, 0, 0],
[239, 0, 0],
[236, 0, 0],
[234, 0, 0],
[231, 0, 0],
[228, 0, 0],
[226, 0, 0],
[223, 0, 0],
[220, 0, 0],
[218, 0, 0],
[215, 0, 0],
[213, 0, 0],
[210, 0, 0],
[207, 0, 0],
[205, 0, 0],
[202, 0, 0],
[200, 0, 0],
[202, 6, 6],
[205, 13, 13],
[207, 20, 20],
[210, 27, 27],
[213, 35, 35],
[215, 43, 43],
[218, 50, 50],
[220, 58, 58],
[223, 66, 66],
[226, 75, 75],
[228, 83, 83],
[231, 92, 92],
[234, 101, 101],
[236, 110, 110],
[239, 119, 119],
[241, 128, 128],
[244, 138, 138],
[247, 147, 147],
[249, 157, 157],
[252, 167, 167],
[255, 178, 178]
]
hottemp = [
[255, 255, 255],
[255, 255, 0],
[255, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
anomaly2 = [
[129, 31, 240],
[124, 30, 240],
[119, 29, 241],
[114, 27, 242],
[108, 26, 242],
[103, 24, 243],
[97, 23, 244],
[91, 21, 245],
[86, 20, 245],
[80, 18, 246],
[73, 17, 247],
[67, 15, 247],
[61, 14, 248],
[55, 12, 249],
[48, 11, 250],
[42, 9, 250],
[35, 7, 251],
[28, 6, 252],
[21, 4, 252],
[14, 3, 253],
[7, 1, 254],
[0, 0, 255],
[3, 6, 253],
[6, 12, 252],
[10, 18, 250],
[13, 24, 249],
[17, 30, 247],
[20, 36, 246],
[23, 42, 245],
[26, 47, 243],
[29, 52, 242],
[33, 57, 240],
[36, 62, 239],
[39, 67, 237],
[42, 72, 236],
[45, 76, 235],
[48, 81, 233],
[51, 85, 232],
[53, 89, 230],
[56, 93, 229],
[59, 97, 227],
[62, 101, 226],
[65, 105, 225],
[63, 106, 226],
[62, 107, 227],
[60, 108, 229],
[59, 110, 230],
[57, 111, 232],
[56, 113, 233],
[54, 115, 235],
[52, 116, 236],
[51, 118, 237],
[49, 120, 239],
[47, 122, 240],
[46, 123, 242],
[44, 125, 243],
[42, 127, 245],
[41, 130, 246],
[39, 132, 247],
[37, 134, 249],
[35, 136, 250],
[33, 139, 252],
[31, 141, 253],
[29, 144, 255],
[28, 145, 255],
[27, 147, 255],
[25, 149, 255],
[24, 151, 255],
[22, 153, 255],
[21, 155, 255],
[19, 158, 255],
[18, 160, 255],
[17, 162, 255],
[15, 164, 255],
[14, 166, 255],
[12, 169, 255],
[11, 171, 255],
[9, 173, 255],
[8, 176, 255],
[7, 178, 255],
[5, 180, 255],
[4, 183, 255],
[2, 185, 255],
[1, 188, 255],
[0, 191, 255],
[7, 191, 254],
[14, 191, 253],
[21, 191, 252],
[28, 191, 251],
[35, 192, 251],
[41, 192, 250],
[48, 193, 249],
[55, 193, 248],
[62, 194, 248],
[69, 194, 247],
[75, 195, 246],
[82, 196, 245],
[88, 196, 245],
[95, 197, 244],
[101, 198, 243],
[108, 199, 242],
[114, 200, 242],
[121, 201, 241],
[127, 202, 240],
[133, 203, 239],
[140, 205, 239],
[143, 206, 239],
[146, 208, 239],
[149, 209, 240],
[152, 211, 240],
[155, 212, 241],
[158, 214, 241],
[161, 215, 242],
[164, 217, 242],
[168, 218, 243],
[171, 220, 243],
[174, 221, 244],
[177, 222, 244],
[180, 224, 245],
[184, 225, 245],
[187, 227, 246],
[190, 228, 246],
[193, 230, 247],
[197, 231, 247],
[200, 233, 248],
[203, 234, 248],
[207, 236, 249],
[255, 255, 255],
[255, 255, 255],
[255, 255, 200],
[255, 254, 192],
[255, 254, 185],
[255, 253, 178],
[255, 252, 171],
[255, 251, 164],
[255, 250, 157],
[255, 249, 149],
[255, 248, 142],
[255, 247, 135],
[255, 246, 128],
[255, 244, 121],
[255, 243, 114],
[255, 241, 107],
[255, 239, 99],
[255, 238, 92],
[255, 236, 85],
[255, 234, 78],
[255, 231, 71],
[255, 229, 64],
[255, 227, 57],
[255, 225, 49],
[255, 222, 47],
[255, 220, 45],
[255, 218, 42],
[255, 215, 40],
[255, 213, 38],
[255, 211, 35],
[255, 208, 33],
[255, 206, 30],
[255, 203, 28],
[255, 201, 26],
[255, 198, 23],
[255, 195, 21],
[255, 193, 19],
[255, 190, 16],
[255, 187, 14],
[255, 184, 11],
[255, 181, 9],
[255, 179, 7],
[255, 176, 4],
[255, 173, 2],
[255, 170, 0],
[255, 167, 0],
[255, 164, 0],
[255, 161, 0],
[255, 158, 0],
[255, 155, 0],
[255, 152, 0],
[255, 149, 0],
[255, 147, 0],
[255, 144, 0],
[255, 141, 0],
[255, 138, 0],
[255, 135, 0],
[255, 132, 0],
[255, 129, 0],
[255, 127, 0],
[255, 124, 0],
[255, 121, 0],
[255, 118, 0],
[255, 115, 0],
[255, 112, 0],
[255, 110, 0],
[255, 104, 0],
[255, 99, 0],
[255, 94, 0],
[255, 89, 0],
[255, 83, 0],
[255, 78, 0],
[255, 73, 0],
[255, 68, 0],
[255, 62, 0],
[255, 57, 0],
[255, 52, 0],
[255, 47, 0],
[255, 41, 0],
[255, 36, 0],
[255, 31, 0],
[255, 26, 0],
[255, 20, 0],
[255, 15, 0],
[255, 10, 0],
[255, 5, 0],
[255, 0, 0],
[252, 0, 0],
[249, 0, 0],
[247, 0, 0],
[244, 0, 0],
[241, 0, 0],
[239, 0, 0],
[236, 0, 0],
[234, 0, 0],
[231, 0, 0],
[228, 0, 0],
[226, 0, 0],
[223, 0, 0],
[220, 0, 0],
[218, 0, 0],
[215, 0, 0],
[213, 0, 0],
[210, 0, 0],
[207, 0, 0],
[205, 0, 0],
[202, 0, 0],
[200, 0, 0],
[200, 6, 6]
]
smap = [
[125, 0, 255],
[0, 0, 255],
[0, 255, 0],
[255, 255, 0],
[255, 125, 0],
[255, 0, 0]
]
| 19.670909
| 74
| 0.409465
| 1,669
| 10,819
| 2.654284
| 0.179748
| 0.090293
| 0.026411
| 0.024379
| 0.804515
| 0.797743
| 0.797743
| 0.793228
| 0.793228
| 0.793228
| 0
| 0.534864
| 0.347814
| 10,819
| 549
| 75
| 19.70674
| 0.092971
| 0.069322
| 0
| 0.920455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
31f34959f00117c9ae66f7b76ce0b28d6b7863d1
| 7,283
|
py
|
Python
|
block_ciphers.py
|
DatJezdziec/block_ciphers
|
4d7541c592ed6b6cb78d07970a334c9d3417eec9
|
[
"MIT"
] | null | null | null |
block_ciphers.py
|
DatJezdziec/block_ciphers
|
4d7541c592ed6b6cb78d07970a334c9d3417eec9
|
[
"MIT"
] | null | null | null |
block_ciphers.py
|
DatJezdziec/block_ciphers
|
4d7541c592ed6b6cb78d07970a334c9d3417eec9
|
[
"MIT"
] | null | null | null |
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
from base64 import b64encode,b64decode
from Crypto.Util import Counter
import binascii
from hashlib import md5
import os
import time
import json
password="0987654321"
key="1234567890abcdef"
key = md5(key.encode('utf8')).digest()
files_to_encrypt = ['1m.txt', '64m.txt', '128m.txt']
def ctre():
for file_to_encrypt in files_to_encrypt:
input_file = open(file_to_encrypt, "rb")
output_file = open('encrypted' + file_to_encrypt, 'w')
startd = time.time()
buffer_size = os.stat(file_to_encrypt).st_size
data = input_file.read(buffer_size)
cipher = AES.new(key, AES.MODE_CTR)
ct_bytes = cipher.encrypt(data)
nonce = b64encode(cipher.nonce).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'nonce':nonce, 'ct':ct})
output_file.write(result)
print("CTR encrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def ctrd():
for file_to_encrypt in files_to_encrypt:
input_file = open('encrypted' + file_to_encrypt, 'rb')
output_file = open('decrypted' + file_to_encrypt, 'wb')
startd = time.time()
buffer_size = os.stat('encrypted' + file_to_encrypt).st_size
json_input = input_file.read(buffer_size)
b64 = json.loads(json_input)
nonce = b64decode(b64['nonce'])
ct = b64decode(b64['ct'])
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce)
pt = cipher.decrypt(ct)
output_file.write(pt)
print("CTR decrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def cbce():
for file_to_encrypt in files_to_encrypt:
input_file = open(file_to_encrypt, "rb")
output_file = open('encrypted' + file_to_encrypt, 'w')
startd = time.time()
buffer_size = os.stat(file_to_encrypt).st_size
data = input_file.read(buffer_size)
cipher = AES.new(key, AES.MODE_CBC)
ct_bytes = cipher.encrypt(pad(data, AES.block_size))
iv = b64encode(cipher.iv).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'iv':iv, 'ct':ct})
output_file.write(result)
print("CBC encrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def cbcd():
for file_to_encrypt in files_to_encrypt:
input_file = open('encrypted' + file_to_encrypt, 'rb')
output_file = open('decrypted' + file_to_encrypt, 'wb')
startd = time.time()
buffer_size = os.stat('encrypted' + file_to_encrypt).st_size
json_input = input_file.read(buffer_size)
b64 = json.loads(json_input)
iv = b64decode(b64['iv'])
ct = b64decode(b64['ct'])
cipher = AES.new(key, AES.MODE_CBC, iv)
pt = unpad(cipher.decrypt(ct), AES.block_size)
output_file.write(pt)
print("CBC decrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def cfbe():
for file_to_encrypt in files_to_encrypt:
input_file = open(file_to_encrypt, "rb")
output_file = open('encrypted' + file_to_encrypt, 'w')
startd = time.time()
buffer_size = os.stat(file_to_encrypt).st_size
data = input_file.read(buffer_size)
cipher = AES.new(key, AES.MODE_CFB)
ct_bytes = cipher.encrypt(data)
iv = b64encode(cipher.iv).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'iv': iv, 'ciphertext': ct})
output_file.write(result)
print("CFB encrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def cfbd():
for file_to_encrypt in files_to_encrypt:
input_file = open('encrypted' + file_to_encrypt, 'rb')
output_file = open('decrypted' + file_to_encrypt, 'wb')
startd = time.time()
buffer_size = os.stat('encrypted' + file_to_encrypt).st_size
json_input = input_file.read(buffer_size)
b64 = json.loads(json_input)
iv = b64decode(b64['iv'])
ct = b64decode(b64['ciphertext'])
cipher = AES.new(key, AES.MODE_CFB, iv=iv)
pt = cipher.decrypt(ct)
output_file.write(pt)
print("CFB decrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def ofbe():
for file_to_encrypt in files_to_encrypt:
input_file = open(file_to_encrypt, "rb")
output_file = open('encrypted' + file_to_encrypt, 'w')
startd = time.time()
buffer_size = os.stat(file_to_encrypt).st_size
data = input_file.read(buffer_size)
cipher = AES.new(key, AES.MODE_OFB)
ct_bytes = cipher.encrypt(data)
iv = b64encode(cipher.iv).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'iv': iv, 'ciphertext': ct})
output_file.write(result)
print("OFB encrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def ofbd():
for file_to_encrypt in files_to_encrypt:
input_file = open('encrypted' + file_to_encrypt, 'rb')
output_file = open('decrypted' + file_to_encrypt, 'wb')
startd = time.time()
buffer_size = os.stat('encrypted' + file_to_encrypt).st_size
json_input = input_file.read(buffer_size)
b64 = json.loads(json_input)
iv = b64decode(b64['iv'])
ct = b64decode(b64['ciphertext'])
cipher = AES.new(key, AES.MODE_OFB, iv=iv)
pt = cipher.decrypt(ct)
output_file.write(pt)
print("OFB decrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def ecbe():
for file_to_encrypt in files_to_encrypt:
input_file = open(file_to_encrypt, "rb")
output_file = open('encrypted' + file_to_encrypt, 'w')
startd = time.time()
buffer_size = os.stat(file_to_encrypt).st_size
data = input_file.read(buffer_size)
cipher = AES.new(key, AES.MODE_ECB)
ct_bytes = cipher.encrypt(pad(data, AES.block_size))
ct = b64encode(ct_bytes).decode('utf-8')
result = json.dumps({'ct':ct})
output_file.write(result)
print("ECB encrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
def ecbd():
for file_to_encrypt in files_to_encrypt:
input_file = open('encrypted' + file_to_encrypt, 'rb')
output_file = open('decrypted' + file_to_encrypt, 'wb')
startd = time.time()
buffer_size = os.stat('encrypted' + file_to_encrypt).st_size
json_input = input_file.read(buffer_size)
b64 = json.loads(json_input)
ct = b64decode(b64['ct'])
cipher = AES.new(key, AES.MODE_ECB)
pt = unpad(cipher.decrypt(ct), AES.block_size)
output_file.write(pt)
print("ECB decrypt time " + file_to_encrypt)
endd = time.time()
print(endd - startd)
| 40.016484
| 69
| 0.610463
| 970
| 7,283
| 4.348454
| 0.091753
| 0.130156
| 0.154101
| 0.078236
| 0.892129
| 0.886439
| 0.886439
| 0.85799
| 0.85799
| 0.830251
| 0
| 0.02024
| 0.267335
| 7,283
| 181
| 70
| 40.237569
| 0.77024
| 0
| 0
| 0.726744
| 0
| 0
| 0.078158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0.005814
| 0.05814
| 0
| 0.116279
| 0.116279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee3ef671726c79b4b6d16f15e24083cf16a57f07
| 30,140
|
py
|
Python
|
server/models/inception_resnet/inception_resnet.py
|
Mobile-and-Ubiquitous-Computing-2020-1/team1
|
a3a5b4916a012ee0cd98cb186046a1957872b550
|
[
"MIT"
] | 3
|
2020-03-23T10:32:43.000Z
|
2020-06-25T03:36:06.000Z
|
server/models/inception_resnet/inception_resnet.py
|
Mobile-and-Ubiquitous-Computing-2020-1/team1
|
a3a5b4916a012ee0cd98cb186046a1957872b550
|
[
"MIT"
] | 4
|
2020-05-11T13:50:00.000Z
|
2022-02-10T01:58:08.000Z
|
server/models/inception_resnet/inception_resnet.py
|
Mobile-and-Ubiquitous-Computing-2020-1/team1
|
a3a5b4916a012ee0cd98cb186046a1957872b550
|
[
"MIT"
] | 1
|
2020-08-13T00:01:01.000Z
|
2020-08-13T00:01:01.000Z
|
"""
Model code for FaceNet
updated version (compatible with TF 2.x) of
https://github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import layers
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
class CenterLoss(layers.Layer):
"""center loss calculation (this is stateful)"""
def __init__(self, num_classes, embed_dim=512):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.embed_dim = embed_dim
def build(self, input_shape):
self.center_var = self.add_weight('center_var',
shape=(self.num_classes, self.embed_dim),
dtype=tf.float32,
initializer=initializers.zeros,
trainable=False)
self.built = True
def call(self, features, labels):
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(self.center_var, labels)
diff = (1 - 0.95) * (centers_batch - features)
with tf.control_dependencies(
[self.center_var.scatter_nd_sub(labels, diff)]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss
class BaseConvBlock(keras.Model):
"""Base Convolution Module"""
def __init__(self,
output_channels,
kernel_size,
strides=(1, 1),
padding='valid',
weight_decay=5e-4,
kernel_initializer=initializers.glorot_uniform,
batch_norm_decay=0.995,
batch_norm_epsilon=0.001,
name=None):
super(BaseConvBlock, self).__init__()
self.conv = layers.Conv2D(filters=output_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(weight_decay) \
if weight_decay > 0 else None,
use_bias=False,
name=name)
self.norm = layers.BatchNormalization(axis=-1,
momentum=batch_norm_decay,
epsilon=batch_norm_epsilon,
name=name)
self.activation = layers.ReLU()
def call(self, x, training=False):
x = self.conv(x)
x = self.norm(x, training=training)
x = self.activation(x)
return x
class Block35(keras.Model):
"""Block35 module"""
def __init__(self, filters, scale=1.0, activation_fn=tf.nn.relu):
super(Block35, self).__init__()
# branch 0
self.tower_conv = BaseConvBlock(32, (1, 1),
padding='same',
name='Conv2d_1x1')
# branch 1
self.tower_conv1_0 = BaseConvBlock(32, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv1_1 = BaseConvBlock(32, (3, 3),
padding='same',
name='Conv2d_0b_3x3')
# branch 2
self.tower_conv2_0 = BaseConvBlock(32, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv2_1 = BaseConvBlock(32, (3, 3),
padding='same',
name='Conv2d_0b_3x3')
self.tower_conv2_2 = BaseConvBlock(32, (3, 3),
padding='same',
name='Conv2d_0c_3x3')
# filters 256
self.up_conv = layers.Conv2D(filters, (1, 1), name='Conv2d_1x1')
self.scale = scale
self.activation_fn = activation_fn
def call(self, x, training=False):
inputs = x
branch1 = self.tower_conv(x, training=training)
branch2 = self.tower_conv1_0(x, training=training)
branch2 = self.tower_conv1_1(branch2, training=training)
branch3 = self.tower_conv2_0(x, training=training)
branch3 = self.tower_conv2_1(branch3, training=training)
branch3 = self.tower_conv2_2(branch3, training=training)
mixed = tf.concat([branch1, branch2, branch3], axis=3) # 32 * 3 == 96
x = self.up_conv(mixed, training=training) # 96 => 256
x = inputs + self.scale * x
if self.activation_fn is not None:
x = self.activation_fn(x)
return x
class Block17(keras.Model):
def __init__(self, filters, scale=1.0, activation_fn=tf.nn.relu):
super(Block17, self).__init__()
# branch 0
self.tower_conv = BaseConvBlock(128, (1, 1),
padding='same',
name='Conv2d_1x1')
# branch 1
self.tower_conv1_0 = BaseConvBlock(128, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv1_1 = BaseConvBlock(128, (1, 7),
padding='same',
name='Conv2d_0b_1x7')
self.tower_conv1_2 = BaseConvBlock(128, (7, 1),
padding='same',
name='Conv2d_0c_7x1')
self.up_conv = layers.Conv2D(filters, (1, 1), name='Conv2d_1x1')
self.scale = scale
self.activation_fn = activation_fn
def call(self, x, training=False):
inputs = x
branch1 = self.tower_conv(x, training=training)
branch2 = self.tower_conv1_0(x, training=training)
branch2 = self.tower_conv1_1(branch2, training=training)
branch2 = self.tower_conv1_2(branch2, training=training)
mixed = tf.concat([branch1, branch2], axis=3)
x = self.up_conv(mixed, training=training)
x = inputs + self.scale * x
if self.activation_fn is not None:
x = self.activation_fn(x)
return x
class Block8(keras.Model):
def __init__(self, filters, scale=1.0, activation_fn=tf.nn.relu):
super(Block8, self).__init__()
# branch 0
self.tower_conv = BaseConvBlock(192, (1, 1),
padding='same',
name='Conv2d_1x1')
# branch 1
self.tower_conv1_0 = BaseConvBlock(192, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv1_1 = BaseConvBlock(192, (1, 3),
padding='same',
name='Conv2d_0b_1x3')
self.tower_conv1_2 = BaseConvBlock(192, (3, 1),
padding='same',
name='Conv2d_0c_3x1')
self.up_conv = layers.Conv2D(filters, (1, 1), name='Conv2d_1x1')
self.scale = scale
self.activation_fn = activation_fn
def call(self, x, training=False):
inputs = x
branch1 = self.tower_conv(x, training=training)
branch2 = self.tower_conv1_0(x, training=training)
branch2 = self.tower_conv1_1(branch2, training=training)
branch2 = self.tower_conv1_2(branch2, training=training)
mixed = tf.concat([branch1, branch2], axis=3)
x = self.up_conv(mixed, training=training)
x = inputs + self.scale * x
if self.activation_fn is not None:
x = self.activation_fn(x)
return x
class ReductionA(keras.Model):
def __init__(self, k, l, m, n):
super(ReductionA, self).__init__()
# branch 0
self.tower_conv = BaseConvBlock(n, (3, 3), (2, 2),
padding='valid',
name='Conv2d_1a_3x3')
# branch 1
self.tower_conv1_0 = BaseConvBlock(k, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv1_1 = BaseConvBlock(l, (3, 3),
padding='same',
name='Conv2d_0b_3x3')
self.tower_conv1_2 = BaseConvBlock(m, (3, 3), (2, 2),
padding='valid',
name='Conv2d_1a_3x3')
# branch 2
self.tower_pool = layers.MaxPooling2D((3, 3), (2, 2), padding='valid',
name='MaxPool_1a_3x3')
def call(self, x, training=False):
branch0 = self.tower_conv(x, training=training) # n
branch1 = self.tower_conv1_0(x, training=training)
branch1 = self.tower_conv1_1(branch1, training=training)
branch1 = self.tower_conv1_2(branch1, training=training) # l
branch2 = self.tower_pool(x) # x
x = tf.concat([branch0, branch1, branch2], axis=3) # n + l + x
return x
class ReductionB(keras.Model):
def __init__(self):
super(ReductionB, self).__init__()
# branch 0
self.tower_conv = BaseConvBlock(256, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv_1 = BaseConvBlock(384, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_1a_3x3')
# branch 1
self.tower_conv1 = BaseConvBlock(256, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv1_1 = BaseConvBlock(256, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_1a_3x3')
# branch 2
self.tower_conv2 = BaseConvBlock(256, (1, 1),
padding='same',
name='Conv2d_0a_1x1')
self.tower_conv2_1 = BaseConvBlock(256, (3, 3),
padding='same',
name='Conv2d_0b_3x3')
self.tower_conv2_2 = BaseConvBlock(256, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_1a_3x3')
self.tower_pool = layers.MaxPooling2D((3, 3), strides=(2, 2),
padding='valid',
name='MaxPool_1a_3x3')
def call(self, x, training=False):
inputs = x
branch0 = self.tower_conv(x, training=training)
branch0 = self.tower_conv_1(branch0, training=training)
branch1 = self.tower_conv1(x, training=training)
branch1 = self.tower_conv1_1(branch1, training=training)
branch2 = self.tower_conv2(x, training=training)
branch2 = self.tower_conv2_1(branch2, training=training)
branch2 = self.tower_conv2_2(branch2, training=training)
branch3 = self.tower_pool(x)
x = tf.concat([branch0, branch1, branch2, branch3], axis=3)
return x
class InceptionResNetV1(keras.Model):
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(InceptionResNetV1, self).__init__()
self.conv1 = BaseConvBlock(32, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_1a_3x3')
self.conv2 = BaseConvBlock(32, (3, 3),
padding='valid',
name='Conv2d_2a_3x3')
self.conv3 = BaseConvBlock(64, (3, 3),
padding='same',
name='Conv2d_2b_3x3')
self.pool = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid',
name='MaxPool_3a_3x3')
self.conv4 = BaseConvBlock(80, (1, 1),
padding='valid',
name='Conv2d_3b_1x1')
self.conv5 = BaseConvBlock(192, (3, 3),
padding='valid',
name='Conv2d_4a_3x3')
self.conv6 = BaseConvBlock(256, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_4b_3x3')
self.block35 = [Block35(256, scale=0.17) for _ in range(5)]
self.reduction_a = ReductionA(192, 192, 256, 384) # 256 + 256 + 384
self.block17 = [Block17(256 + 256 + 384, scale=0.10) for _ in range(10)]
self.reduction_b = ReductionB()
self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
if i < 5 else None) for i in range(6)]
self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')
self.flatten = layers.Flatten()
self.dropout = layers.Dropout(1 - dropout_keep_prob)
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(InceptionResNetV1, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
if len(x.shape) == 4:
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.pool(x, training=training)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
for block in self.block35:
x = block(x, training=training)
x = self.reduction_a(x, training=training)
for block in self.block17:
x = block(x, training=training)
x = self.reduction_b(x, training=training)
for block in self.block8:
x = block(x, training=training)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=training)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
else:
assert len(x.shape) == 2
prelogits = x
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
def feature_extract(self, x):
"""just feature extraction without training flag"""
x = self.conv1(x, training=False)
x = self.conv2(x, training=False)
x = self.conv3(x, training=False)
x = self.pool(x, training=False)
x = self.conv4(x, training=False)
x = self.conv5(x, training=False)
x = self.conv6(x, training=False)
for block in self.block35:
x = block(x, training=False)
x = self.reduction_a(x, training=False)
for block in self.block17:
x = block(x, training=False)
x = self.reduction_b(x, training=False)
for block in self.block8:
x = block(x, training=False)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=False)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=False)
return prelogits
class ThawedModel1(keras.Model):
"""
assume only first three layers are fixed
input size should be 38 x 38 x 64
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel1, self).__init__()
self.conv4 = BaseConvBlock(80, (1, 1),
padding='valid',
name='Conv2d_3b_1x1')
self.conv5 = BaseConvBlock(192, (3, 3),
padding='valid',
name='Conv2d_4a_3x3')
self.conv6 = BaseConvBlock(256, (3, 3),
strides=(2, 2),
padding='valid',
name='Conv2d_4b_3x3')
self.block35 = [Block35(256, scale=0.17) for _ in range(5)]
self.reduction_a = ReductionA(192, 192, 256, 384) # 256 + 256 + 384
self.block17 = [Block17(256 + 256 + 384, scale=0.10) for _ in range(10)]
self.reduction_b = ReductionB()
self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
if i < 5 else None) for i in range(6)]
self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')
self.flatten = layers.Flatten()
self.dropout = layers.Dropout(1 - dropout_keep_prob)
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel1, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
for block in self.block35:
x = block(x, training=training)
x = self.reduction_a(x, training=training)
for block in self.block17:
x = block(x, training=training)
x = self.reduction_b(x, training=training)
for block in self.block8:
x = block(x, training=training)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=training)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
class ThawedModel2(keras.Model):
"""
input size should be 17 x 17 x 256
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel2, self).__init__()
self.block35 = [Block35(256, scale=0.17) for _ in range(5)]
self.reduction_a = ReductionA(192, 192, 256, 384) # 256 + 256 + 384
self.block17 = [Block17(256 + 256 + 384, scale=0.10) for _ in range(10)]
self.reduction_b = ReductionB()
self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
if i < 5 else None) for i in range(6)]
self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')
self.flatten = layers.Flatten()
self.dropout = layers.Dropout(1 - dropout_keep_prob)
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel2, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
for block in self.block35:
x = block(x, training=training)
x = self.reduction_a(x, training=training)
for block in self.block17:
x = block(x, training=training)
x = self.reduction_b(x, training=training)
for block in self.block8:
x = block(x, training=training)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=training)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
class ThawedModel3(keras.Model):
"""
input size should be 17 x 17 x 256
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel3, self).__init__()
self.reduction_a = ReductionA(192, 192, 256, 384) # 256 + 256 + 384
self.block17 = [Block17(256 + 256 + 384, scale=0.10) for _ in range(10)]
self.reduction_b = ReductionB()
self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
if i < 5 else None) for i in range(6)]
self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')
self.flatten = layers.Flatten()
self.dropout = layers.Dropout(1 - dropout_keep_prob)
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel3, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
x = self.reduction_a(x, training=training)
for block in self.block17:
x = block(x, training=training)
x = self.reduction_b(x, training=training)
for block in self.block8:
x = block(x, training=training)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=training)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
class ThawedModel4(keras.Model):
"""
input size should be 8 x 8 x 896
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel4, self).__init__()
self.reduction_b = ReductionB()
self.block8 = [Block8(1792, scale=0.20, activation_fn=tf.nn.relu \
if i < 5 else None) for i in range(6)]
self.avg_pool = layers.GlobalAveragePooling2D(name='AvgPool_1a_global')
self.flatten = layers.Flatten()
self.dropout = layers.Dropout(1 - dropout_keep_prob)
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel4, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
x = self.reduction_b(x, training=training)
for block in self.block8:
x = block(x, training=training)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.dropout(x, training=training)
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
class ThawedModel5(keras.Model):
"""
input size should be 1792
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel5, self).__init__()
self.embedding = layers.Dense(bottleneck_layer_size, name='Bottleneck',
use_bias=False)
self.last_bn = layers.BatchNormalization()
# pylint: disable=line-too-long
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel5, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
prelogits = self.embedding(x)
prelogits = self.last_bn(prelogits, training=training)
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
class ThawedModel6(keras.Model):
"""
input size should be 512
"""
def __init__(self,
dropout_keep_prob=0.4,
bottleneck_layer_size=512,
use_center_loss=False,
num_classes=8631):
super(ThawedModel6, self).__init__()
self.classifier = layers.Dense(num_classes,
kernel_initializer=initializers.glorot_uniform,
kernel_regularizer=regularizers.l2(5e-4),
name='Logits')
self.activation = layers.Activation('softmax')
self.use_center_loss = use_center_loss
if use_center_loss:
self.center_loss = CenterLoss(num_classes, 512)
def build(self, input_shape):
if self.use_center_loss:
self.center_loss.build(input_shape)
super(ThawedModel6, self).build(input_shape)
def calculate_embedding(self, prelogits):
# https://github.com/tamerthamoqa/facenet-pytorch-vggface2/blob/master/models/resnet.py
x = tf.nn.l2_normalize(prelogits, axis=1, epsilon=1e-10)
x = x * 10.
return x
def calculate_center_loss(self, features, labels):
assert self.use_center_loss
return self.center_loss(features, labels)
def call(self, x, training=False):
prelogits = x
x = self.calculate_embedding(prelogits)
x = self.classifier(x)
x = self.activation(x)
return x, prelogits
| 36.400966
| 91
| 0.587459
| 3,586
| 30,140
| 4.747351
| 0.071389
| 0.022321
| 0.048931
| 0.032072
| 0.869361
| 0.830768
| 0.801104
| 0.782072
| 0.764391
| 0.756814
| 0
| 0.050983
| 0.306271
| 30,140
| 827
| 92
| 36.444982
| 0.763212
| 0.05063
| 0
| 0.749206
| 0
| 0
| 0.032301
| 0
| 0
| 0
| 0
| 0
| 0.012698
| 1
| 0.080952
| false
| 0
| 0.014286
| 0
| 0.163492
| 0.001587
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e7f113ed922d0ca448a810468e43513bff7ff12
| 6,790
|
py
|
Python
|
dfirtrack_main/tests/os/test_os_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 4
|
2020-03-06T17:37:09.000Z
|
2020-03-17T07:50:55.000Z
|
dfirtrack_main/tests/os/test_os_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | null | null | null |
dfirtrack_main/tests/os/test_os_views.py
|
blackhatethicalhacking/dfirtrack
|
9c2e13015291f2981d14d63c9683e7c447e91f3a
|
[
"MIT"
] | 1
|
2020-03-06T20:54:52.000Z
|
2020-03-06T20:54:52.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Os
import urllib.parse
class OsViewTestCase(TestCase):
""" os view tests """
@classmethod
def setUpTestData(cls):
# create object
Os.objects.create(os_name='os_1')
# create user
test_user = User.objects.create_user(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
def test_oss_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/oss/', safe='')
# get response
response = self.client.get('/oss/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_oss_list_logged_in(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/')
# compare
self.assertEqual(response.status_code, 200)
def test_oss_list_template(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/os/oss_list.html')
def test_oss_list_get_user_context(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_os')
def test_oss_detail_not_logged_in(self):
""" test detail view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/oss/' + str(os_1.os_id), safe='')
# get response
response = self.client.get('/oss/' + str(os_1.os_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_oss_detail_logged_in(self):
""" test detail view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id))
# compare
self.assertEqual(response.status_code, 200)
def test_oss_detail_template(self):
""" test detail view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id))
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/os/oss_detail.html')
def test_oss_detail_get_user_context(self):
""" test detail view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id))
# compare
self.assertEqual(str(response.context['user']), 'testuser_os')
def test_oss_add_not_logged_in(self):
""" test add view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/oss/add/', safe='')
# get response
response = self.client.get('/oss/add/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_oss_add_logged_in(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_oss_add_template(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/os/oss_add.html')
def test_oss_add_get_user_context(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_os')
def test_oss_edit_not_logged_in(self):
""" test edit view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/oss/' + str(os_1.os_id) + '/edit/', safe='')
# get response
response = self.client.get('/oss/' + str(os_1.os_id) + '/edit/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_oss_edit_logged_in(self):
""" test edit view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id) + '/edit/')
# compare
self.assertEqual(response.status_code, 200)
def test_oss_edit_template(self):
""" test edit view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id) + '/edit/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/os/oss_edit.html')
def test_oss_edit_get_user_context(self):
""" test edit view """
# get object
os_1 = Os.objects.get(os_name='os_1')
# login testuser
login = self.client.login(username='testuser_os', password='n7hIWBsrGsG0n4mSjbfw')
# get response
response = self.client.get('/oss/' + str(os_1.os_id) + '/edit/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_os')
| 35.364583
| 105
| 0.620913
| 809
| 6,790
| 5.016069
| 0.080346
| 0.069
| 0.022178
| 0.090685
| 0.903154
| 0.871365
| 0.857812
| 0.848448
| 0.785116
| 0.77723
| 0
| 0.019977
| 0.248012
| 6,790
| 191
| 106
| 35.549738
| 0.774775
| 0.139764
| 0
| 0.54321
| 0
| 0
| 0.145397
| 0.022003
| 0
| 0
| 0
| 0
| 0.197531
| 1
| 0.209877
| false
| 0.160494
| 0.049383
| 0
| 0.271605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4ea129f6fb6686b442d2238251ed35e2a4eded4d
| 1,235
|
py
|
Python
|
exception.py
|
kebrick/pyucallerapi
|
90e099bb206e5def916927228006bcf7e755926a
|
[
"MIT"
] | null | null | null |
exception.py
|
kebrick/pyucallerapi
|
90e099bb206e5def916927228006bcf7e755926a
|
[
"MIT"
] | null | null | null |
exception.py
|
kebrick/pyucallerapi
|
90e099bb206e5def916927228006bcf7e755926a
|
[
"MIT"
] | null | null | null |
class uCallerException(Exception):
pass
class GetException(uCallerException):
"""Basic exception for errors thrown on get request."""
def __init__(self, name_class, name_method, message):
super().__init__(f"Class \"{name_class}\": Method \"{name_method}\" - {message}")
class SetSession(uCallerException):
"""Base exception for errors caused within a get couriers."""
def __init__(self, name_class, name_method, message):
super().__init__(f"Class {name_class}: Method - {name_method} - {message}")
class SetServiceId(uCallerException):
"""Base exception for errors caused within a get couriers."""
def __init__(self, name_class, name_method, message):
super().__init__(f"Class {name_class}: Method - {name_method} - {message}")
class SetKey(uCallerException):
"""Base exception for errors caused within a get couriers."""
def __init__(self, name_class, name_method, message, exit_now: int = None):
super().__init__(f"Class {name_class}: Method - {name_method} - {message}")
if exit_now is not None:
exit(exit_now)
class ParamSetException(uCallerException):
""""""
def __init__(self, name_class, name_method, message):
super().__init__(f"Class {name_class}: Method - {name_method} - {message}")
| 30.875
| 83
| 0.729555
| 157
| 1,235
| 5.33758
| 0.229299
| 0.107399
| 0.202864
| 0.089499
| 0.74105
| 0.74105
| 0.74105
| 0.74105
| 0.74105
| 0.74105
| 0
| 0
| 0.128745
| 1,235
| 39
| 84
| 31.666667
| 0.77881
| 0.175709
| 0
| 0.421053
| 0
| 0
| 0.247225
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0.052632
| 0
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
4ebc120943cb4a46be36712c48ecd451e0f09888
| 99
|
py
|
Python
|
libconform/base.py
|
jofas/conform
|
9f8dd3c7c607269529bf4d62a729ed2ca1880baa
|
[
"MIT"
] | 5
|
2020-02-10T13:30:06.000Z
|
2021-12-22T16:08:02.000Z
|
libconform/base.py
|
jofas/conform
|
9f8dd3c7c607269529bf4d62a729ed2ca1880baa
|
[
"MIT"
] | 1
|
2019-07-04T14:12:13.000Z
|
2020-06-16T16:05:02.000Z
|
libconform/base.py
|
jofas/conform
|
9f8dd3c7c607269529bf4d62a729ed2ca1880baa
|
[
"MIT"
] | null | null | null |
class NCMBase:
def fit(self, X, y):
pass
def scores(self, X, y, cp):
pass
| 14.142857
| 31
| 0.494949
| 15
| 99
| 3.266667
| 0.666667
| 0.204082
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.383838
| 99
| 6
| 32
| 16.5
| 0.803279
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
4ec15c7e6207984f5322a06ca6c503c76940eb52
| 6,940
|
py
|
Python
|
tp_gst.py
|
lightbooster/TP-GST-BERT-Tacotron2
|
aa07e81c1ea3ace02bec5ac70f25a545d1ff0eb1
|
[
"BSD-3-Clause"
] | null | null | null |
tp_gst.py
|
lightbooster/TP-GST-BERT-Tacotron2
|
aa07e81c1ea3ace02bec5ac70f25a545d1ff0eb1
|
[
"BSD-3-Clause"
] | null | null | null |
tp_gst.py
|
lightbooster/TP-GST-BERT-Tacotron2
|
aa07e81c1ea3ace02bec5ac70f25a545d1ff0eb1
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T20:08:56.000Z
|
2021-05-25T20:08:56.000Z
|
import torch
from torch import nn
from torch.nn import functional as F
from layers import LinearNorm
class TPCW(nn.Module):
"""
Text-Predicting Combination Weights of GST
"""
def __init__(self, hparams):
"""
constructs TPCW model
:param hparams: hyper parameters object
"""
super().__init__()
self.hidden_state_dim = hparams.tpcw_gru_hidden_state_dim
self.encoder_embedding_dim = hparams.encoder_embedding_dim + \
(hparams.bert_encoder_dim if hparams.tp_gst_use_bert else 0)
self.attention_heads_num = hparams.num_heads
self.token_num = hparams.token_num
self.gru = nn.GRU(input_size=self.encoder_embedding_dim,
hidden_size=self.hidden_state_dim,
num_layers=1,
batch_first=True)
self.fc_layer = LinearNorm(in_dim=self.hidden_state_dim,
out_dim=int(self.token_num * self.attention_heads_num))
self.soft_max_layer = nn.Softmax(dim=2)
def forward(self, inputs):
"""
forwarding through the model layers
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: combination weights tensor shape of (batch_size, attention_heads_num, token_num)
"""
self.gru.flatten_parameters()
_, hidden_state_n = self.gru(inputs)
# hidden_state_n - tensor shape of (1, batch_size, hidden_state_dim)
# bring to shape (batch_size, hidden_state_dim)
hidden_state_n = hidden_state_n.squeeze(dim=0)
fc_output = self.fc_layer(hidden_state_n)
# fc_output - tensor shape of (batch_size, token_num * attention_heads_num)
# reshape to (batch_size, attention_heads_num, token_num)
fc_output = fc_output.reshape(-1, self.attention_heads_num, self.token_num)
w_combination = self.soft_max_layer(fc_output)
return w_combination
def inference(self, inputs):
"""
perform inference
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: combination weights tensor shape of (batch_size, token_num)
"""
pass
class TPSE(nn.Module):
"""
Text-Predicting Style Embedding
"""
def __init__(self, hparams):
super().__init__()
self.hidden_state_dim = hparams.tpse_gru_hidden_state_dim
self.encoder_embedding_dim = hparams.encoder_embedding_dim + \
(hparams.bert_encoder_dim if hparams.tp_gst_use_bert else 0)
self.fc_layers = hparams.tpse_fc_layers
self.fc_layers_dim = hparams.tpse_fc_layer_dim
self.token_dim = hparams.token_embedding_size
self.gru = nn.GRU(input_size=self.encoder_embedding_dim,
hidden_size=self.hidden_state_dim,
num_layers=1,
batch_first=True)
self.fc_layers_model = None
if self.fc_layers < 1:
raise ValueError('hparams.fc_layers must be 1 or greater')
elif self.fc_layers == 1:
self.fc_layers_model = nn.Sequential(LinearNorm(self.hidden_state_dim, self.token_dim),
nn.Tanh())
else:
fc_layers_list = []
# input layer
fc_layers_list.append(LinearNorm(self.hidden_state_dim, self.fc_layers_dim))
fc_layers_list.append(nn.ReLU())
# hidden layers
for i in range(self.fc_layers - 2):
fc_layers_list.append(LinearNorm(self.fc_layers_dim, self.fc_layers_dim))
fc_layers_list.append(nn.ReLU())
# output layer
fc_layers_list.append(LinearNorm(self.fc_layers_dim, self.token_dim))
fc_layers_list.append(nn.Tanh())
self.fc_layers_model = nn.Sequential(*fc_layers_list)
def forward(self, inputs):
"""
forwarding through the model layers
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: style token tensor shape of (batch_size, token_dim)
"""
self.gru.flatten_parameters()
_, hidden_state_n = self.gru(inputs)
# hidden_state_n - tensor shape of (1, batch_size, hidden_state_dim)
# bring to shape (batch_size, hidden_state_dim)
hidden_state_n = hidden_state_n.squeeze(dim=0)
fc_output = self.fc_layers_model(hidden_state_n)
return fc_output
def inference(self, inputs):
"""
perform inference
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: style token tensor shape of (batch_size, token_dim)
"""
pass
class TPSELinear(nn.Module):
"""
Text-Predicting Style Embedding (without rnn layer)
"""
def __init__(self, hparams):
super().__init__()
self.encoder_embedding_dim = hparams.encoder_embedding_dim + \
(hparams.bert_encoder_dim if hparams.tp_gst_use_bert else 0)
self.fc_layers = hparams.tpse_linear_fc_layers
self.fc_layers_dim = hparams.tpse_linear_fc_layer_dim
self.token_dim = hparams.token_embedding_size
self.fc_layers_model = None
if self.fc_layers < 1:
raise ValueError('hparams.fc_layers must be 1 or greater')
elif self.fc_layers == 1:
self.fc_layers_model = nn.Sequential(LinearNorm(self.encoder_embedding_dim, self.token_dim),
nn.Tanh())
else:
fc_layers_list = []
# input layer
fc_layers_list.append(LinearNorm(self.encoder_embedding_dim, self.fc_layers_dim))
fc_layers_list.append(nn.ReLU())
# hidden layers
for i in range(self.fc_layers - 2):
fc_layers_list.append(LinearNorm(self.fc_layers_dim, self.fc_layers_dim))
fc_layers_list.append(nn.ReLU())
# output layer
fc_layers_list.append(LinearNorm(self.fc_layers_dim, self.token_dim))
fc_layers_list.append(nn.Tanh())
self.fc_layers_model = nn.Sequential(*fc_layers_list)
def forward(self, inputs):
"""
forwarding through the model layers
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: style token tensor shape of (batch_size, token_dim)
"""
fc_output = self.fc_layers_model(inputs)
return fc_output
def inference(self, inputs):
"""
perform inference
:param inputs: encoder output shape of (batch_size, max_seq_len, embedding_dim)
:return: style token tensor shape of (batch_size, token_dim)
"""
pass
| 39.431818
| 104
| 0.62781
| 875
| 6,940
| 4.635429
| 0.117714
| 0.09073
| 0.076923
| 0.051282
| 0.859714
| 0.847387
| 0.807446
| 0.755671
| 0.738905
| 0.738905
| 0
| 0.003863
| 0.29121
| 6,940
| 175
| 105
| 39.657143
| 0.820695
| 0.240346
| 0
| 0.705263
| 0
| 0
| 0.015354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094737
| false
| 0.031579
| 0.042105
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
14f4e20f4bf8aa610dde7ba4000361be275fdb25
| 190
|
py
|
Python
|
project_2/interpreter/app/token.py
|
jelic98/raf_pp
|
7fcee2745bf3c47971a93d71fe5195d3bf29ea2d
|
[
"Apache-2.0"
] | 1
|
2020-10-14T14:35:41.000Z
|
2020-10-14T14:35:41.000Z
|
project_2/interpreter/app/token.py
|
jelic98/raf_pp
|
7fcee2745bf3c47971a93d71fe5195d3bf29ea2d
|
[
"Apache-2.0"
] | null | null | null |
project_2/interpreter/app/token.py
|
jelic98/raf_pp
|
7fcee2745bf3c47971a93d71fe5195d3bf29ea2d
|
[
"Apache-2.0"
] | null | null | null |
class Token():
def __init__(self, token_type, value):
self.token_type = token_type
self.value = value
def __str__(self):
return "<{} {}>".format(self.token_type, self.value)
| 23.75
| 55
| 0.668421
| 26
| 190
| 4.423077
| 0.384615
| 0.313043
| 0.33913
| 0.313043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178947
| 190
| 7
| 56
| 27.142857
| 0.737179
| 0
| 0
| 0
| 0
| 0
| 0.038251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
0914aec0a5b481fe72e5b016c37f835f45d0c080
| 4,016
|
py
|
Python
|
userbot/modules/salam.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/salam.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/salam.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\xca\x00\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x01l\x01Z\x01d\x00d\x01l\x02Z\x02d\x00d\x02l\x02m\x03Z\x03\x01\x00d\x00d\x03l\x04m\x05Z\x05m\x06Z\x06\x01\x00d\x00d\x04l\x07m\x08Z\x08m\tZ\tm\nZ\n\x01\x00d\x00d\x05l\x0bm\x0cZ\x0c\x01\x00e\nr`e\re\n\x83\x01n\x06e\x06\x83\x00j\x0eZ\x0fe\x0cd\x06d\x07d\x08\x8d\x02d\td\n\x84\x00\x83\x01Z\x10e\x0cd\x06d\x0bd\x08\x8d\x02d\x0cd\n\x84\x00\x83\x01Z\x10e\x0cd\x06d\rd\x08\x8d\x02d\x0ed\n\x84\x00\x83\x01Z\x10e\x0cd\x06d\x0fd\x08\x8d\x02d\x10d\n\x84\x00\x83\x01Z\x10e\x08\xa0\x11d\x11d\x12i\x01\xa1\x01\x01\x00d\x01S\x00)\x13\xe9\x00\x00\x00\x00N)\x01\xda\x05sleep)\x02\xda\x0epython_version\xda\x05uname)\x03\xda\x08CMD_HELP\xda\tZALG_LIST\xda\nALIVE_NAME)\x01\xda\x08registerTz\r^P(?: |$)(.*))\x02Z\x08outgoingZ\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\xc3\x00\x00\x00sH\x00\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01}\x01t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x02t\x04\x9b\x00d\x03\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00t\x02d\x04\x83\x01\x01\x00|\x00\xa0\x03d\x05\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00\xa9\x06N\xe9\x01\x00\x00\x00z\x13**Hallo Semua Saya z\x02**\xe9\x02\x00\x00\x00u\x1a\x00\x00\x00`Assalamualaikum.....\xf0\x9f\x98\x9a`\xa9\x05\xda\rpattern_match\xda\x05groupr\x02\x00\x00\x00\xda\x04edit\xda\x0bDEFAULTUSER\xa9\x02Z\x05typew\xda\x07message\xa9\x00r\x13\x00\x00\x00\xda\x00\xda\ntypewriter\r\x00\x00\x00s\n\x00\x00\x00\x00\x02\x0c\x01\x08\x01\x18\x01\x08\x01r\x15\x00\x00\x00z\r^p(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\xc3\x00\x00\x00sH\x00\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01}\x01t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x02t\x04\x9b\x00d\x03\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00t\x02d\x04\x83\x01\x01\x00|\x00\xa0\x03d\x05\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00r\t\x00\x00\x00r\x0c\x00\x00\x00r\x11\x00\x00\x00r\x13\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00\x16\x00\x00\x00s\n\x00\x00\x00\x00\x02\x0c\x01\x08\x01\x18\x01\x08\x01z\r^L(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\xc3\x00\x00\x00s@\x00\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01}\x01t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x02\xa1\x01I\x00d\x00H\x00\x01\x00t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x03\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)\x04Nr\n\x00\x00\x00\xfa$`Astaghfirulloh Jawab Salam Dong...`z\x17`Waallaikumsalam......`\xa9\x04r\r\x00\x00\x00r\x0e\x00\x00\x00r\x02\x00\x00\x00r\x0f\x00\x00\x00r\x11\x00\x00\x00r\x13\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00\x1f\x00\x00\x00s\n\x00\x00\x00\x00\x02\x0c\x01\x08\x01\x10\x01\x08\x01z\r^l(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\xc3\x00\x00\x00s@\x00\x00\x00|\x00j\x00\xa0\x01d\x01\xa1\x01}\x01t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x02\xa1\x01I\x00d\x00H\x00\x01\x00t\x02d\x01\x83\x01\x01\x00|\x00\xa0\x03d\x03\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)\x04Nr\n\x00\x00\x00r\x16\x00\x00\x00z\x16`Waallaikumsalam.....`r\x17\x00\x00\x00r\x11\x00\x00\x00r\x13\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00(\x00\x00\x00s\n\x00\x00\x00\x00\x02\x0c\x01\x08\x01\x10\x01\x08\x01Z\x05salamzA`P`\nUsage: Untuk Memberi salam.\n\n`L`\nUsage: Untuk Menjawab Salam.)\x12Z\x07asyncio\xda\x02re\xda\x04timer\x02\x00\x00\x00\xda\x08platformr\x03\x00\x00\x00r\x04\x00\x00\x00Z\x07userbotr\x05\x00\x00\x00r\x06\x00\x00\x00r\x07\x00\x00\x00Z\x0euserbot.eventsr\x08\x00\x00\x00\xda\x03strZ\x04noder\x10\x00\x00\x00r\x15\x00\x00\x00\xda\x06updater\x13\x00\x00\x00r\x13\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s&\x00\x00\x00\x08\x01\x08\x01\x08\x01\x0c\x01\x10\x01\x14\x01\x0c\x03\x14\x03\n\x01\n\x08\n\x01\n\x08\n\x01\n\x08\n\x01\n\t\x04\x01\x02\x01\x02\xfe'))
| 1,004
| 3,945
| 0.755229
| 878
| 4,016
| 3.448747
| 0.192483
| 0.342801
| 0.261559
| 0.21004
| 0.569022
| 0.559115
| 0.540621
| 0.533355
| 0.506605
| 0.505614
| 0
| 0.362885
| 0.005727
| 4,016
| 4
| 3,945
| 1,004
| 0.395442
| 0.012948
| 0
| 0
| 0
| 0.5
| 0.989402
| 0.974262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 13
|
0928d94875922df6c3a71123f407279263b86046
| 9,439
|
py
|
Python
|
compiler/front_end/write_inference_test.py
|
chloeyutianyi/emboss
|
ec9b566848d322e0afd598327a6e81a8c7953008
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
compiler/front_end/write_inference_test.py
|
chloeyutianyi/emboss
|
ec9b566848d322e0afd598327a6e81a8c7953008
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
compiler/front_end/write_inference_test.py
|
chloeyutianyi/emboss
|
ec9b566848d322e0afd598327a6e81a8c7953008
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ...emboss.front_end.write_inference."""
import unittest
from compiler.front_end import glue
from compiler.front_end import test_util
from compiler.front_end import write_inference
from compiler.util import ir_pb2
class WriteInferenceTest(unittest.TestCase):
def _make_ir(self, emb_text):
ir, unused_debug_info, errors = glue.parse_emboss_file(
"m.emb",
test_util.dict_file_reader({"m.emb": emb_text}),
stop_before_step="set_write_methods")
assert not errors, errors
return ir
def test_adds_physical_write_method(self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n")
self.assertEqual([], write_inference.set_write_methods(ir))
self.assertTrue(
ir.module[0].type[0].structure.field[0].write_method.physical)
def test_adds_read_only_write_method_to_non_alias_virtual(self):
ir = self._make_ir("struct Foo:\n"
" let x = 5\n")
self.assertEqual([], write_inference.set_write_methods(ir))
self.assertTrue(
ir.module[0].type[0].structure.field[0].write_method.read_only)
def test_adds_alias_write_method_to_alias_of_physical_field(self):
ir = self._make_ir("struct Foo:\n"
" let x = y\n"
" 0 [+1] UInt y\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertTrue(field.write_method.HasField("alias"))
self.assertEqual(
"y", field.write_method.alias.path[0].canonical_name.object_path[-1])
def test_adds_alias_write_method_to_alias_of_alias_of_physical_field(self):
ir = self._make_ir("struct Foo:\n"
" let x = z\n"
" let z = y\n"
" 0 [+1] UInt y\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertTrue(field.write_method.HasField("alias"))
self.assertEqual(
"z", field.write_method.alias.path[0].canonical_name.object_path[-1])
def test_adds_read_only_write_method_to_alias_of_read_only(self):
ir = self._make_ir("struct Foo:\n"
" let x = y\n"
" let y = 5\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertTrue(field.write_method.read_only)
def test_adds_read_only_write_method_to_alias_of_alias_of_read_only(self):
ir = self._make_ir("struct Foo:\n"
" let x = z\n"
" let z = y\n"
" let y = 5\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertTrue(field.write_method.read_only)
def test_adds_read_only_write_method_to_alias_of_parameter(self):
ir = self._make_ir("struct Foo(x: UInt:8):\n"
" let y = x\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertTrue(field.write_method.read_only)
def test_adds_transform_write_method_to_base_value_field(self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = x + 50\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.SUBTRACTION,
transform.function_body.function.function)
arg0, arg1 = transform.function_body.function.args
self.assertEqual("$logical_value",
arg0.builtin_reference.canonical_name.object_path[0])
self.assertEqual("50", arg1.constant.value)
def test_adds_transform_write_method_to_negative_base_value_field(self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = x - 50\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.ADDITION,
transform.function_body.function.function)
arg0, arg1 = transform.function_body.function.args
self.assertEqual("$logical_value",
arg0.builtin_reference.canonical_name.object_path[0])
self.assertEqual("50", arg1.constant.value)
def test_adds_transform_write_method_to_reversed_base_value_field(self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = 50 + x\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.SUBTRACTION,
transform.function_body.function.function)
arg0, arg1 = transform.function_body.function.args
self.assertEqual("$logical_value",
arg0.builtin_reference.canonical_name.object_path[0])
self.assertEqual("50", arg1.constant.value)
def test_adds_transform_write_method_to_reversed_negative_base_value_field(
self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = 50 - x\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.SUBTRACTION,
transform.function_body.function.function)
arg0, arg1 = transform.function_body.function.args
self.assertEqual("50", arg0.constant.value)
self.assertEqual("$logical_value",
arg1.builtin_reference.canonical_name.object_path[0])
def test_adds_transform_write_method_to_nested_invertible_field(self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = 30 + (50 - x)\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.SUBTRACTION,
transform.function_body.function.function)
arg0, arg1 = transform.function_body.function.args
self.assertEqual("50", arg0.constant.value)
self.assertEqual(ir_pb2.Function.SUBTRACTION, arg1.function.function)
arg10, arg11 = arg1.function.args
self.assertEqual("$logical_value",
arg10.builtin_reference.canonical_name.object_path[0])
self.assertEqual("30", arg11.constant.value)
def test_does_not_add_transform_write_method_for_parameter_target(self):
ir = self._make_ir("struct Foo(x: UInt:8):\n"
" let y = 50 + x\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[0]
self.assertEqual("read_only", field.write_method.WhichOneof("method"))
def test_adds_transform_write_method_with_complex_auxiliary_subexpression(
self):
ir = self._make_ir("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = x - $max(Foo.$size_in_bytes, Foo.z)\n"
" let z = 500\n")
self.assertEqual([], write_inference.set_write_methods(ir))
field = ir.module[0].type[0].structure.field[1]
transform = field.write_method.transform
self.assertTrue(transform)
self.assertEqual(
"x",
transform.destination.path[0].canonical_name.object_path[-1])
self.assertEqual(ir_pb2.Function.ADDITION,
transform.function_body.function.function)
args = transform.function_body.function.args
self.assertEqual("$logical_value",
args[0].builtin_reference.canonical_name.object_path[0])
self.assertEqual(field.read_transform.function.args[1], args[1])
if __name__ == "__main__":
unittest.main()
| 43.497696
| 77
| 0.658968
| 1,253
| 9,439
| 4.717478
| 0.134078
| 0.109119
| 0.038065
| 0.033159
| 0.808831
| 0.795635
| 0.779733
| 0.762308
| 0.756894
| 0.718998
| 0
| 0.022352
| 0.222693
| 9,439
| 216
| 78
| 43.699074
| 0.78329
| 0.063142
| 0
| 0.718232
| 0
| 0
| 0.095864
| 0.00272
| 0
| 0
| 0
| 0
| 0.314917
| 1
| 0.082873
| false
| 0
| 0.027624
| 0
| 0.121547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
119ce26edc610780942723b4145c63843f36af4e
| 106
|
py
|
Python
|
tools/Polygraphy/polygraphy/backend/pyt/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | 4
|
2021-04-16T13:49:38.000Z
|
2022-01-16T08:58:07.000Z
|
tools/Polygraphy/polygraphy/backend/pyt/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/polygraphy/backend/pyt/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | 2
|
2021-02-04T14:46:10.000Z
|
2021-02-04T14:56:08.000Z
|
from polygraphy.backend.pyt.loader import BaseLoadPyt
from polygraphy.backend.pyt.runner import PytRunner
| 35.333333
| 53
| 0.867925
| 14
| 106
| 6.571429
| 0.642857
| 0.304348
| 0.456522
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 106
| 2
| 54
| 53
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
119ea49519131a0ad71bb974356c388970cedac9
| 4,888
|
py
|
Python
|
picbackend/views/v2/case_management_module_views/individual_cm_step_views/default_enrollment_step_2_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picbackend/views/v2/case_management_module_views/individual_cm_step_views/default_enrollment_step_2_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
picbackend/views/v2/case_management_module_views/individual_cm_step_views/default_enrollment_step_2_views/tools/create_update_delete.py
|
bbcawodu/careadvisors-backend
|
5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838
|
[
"MIT"
] | null | null | null |
import datetime
import pytz
from picbackend.views.utils import clean_int_value_from_dict_object
from picbackend.views.utils import clean_string_value_from_dict_object
def validate_put_rqst_params(rqst_body, rqst_errors):
validated_params = {
'rqst_action': clean_string_value_from_dict_object(rqst_body, "root", "db_action", rqst_errors)
}
rqst_action = validated_params['rqst_action']
if rqst_action == 'create':
validate_create_row_params(rqst_body, validated_params, rqst_errors)
elif rqst_action == 'update':
validated_params['id'] = clean_int_value_from_dict_object(rqst_body, "root", "id", rqst_errors)
validate_update_row_params(rqst_body, validated_params, rqst_errors)
elif rqst_action == 'delete':
validated_params['id'] = clean_int_value_from_dict_object(rqst_body, "root", "id", rqst_errors)
return validated_params
def validate_create_row_params(rqst_body, validated_params, rqst_errors):
validated_params['consumer_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"consumer_id",
rqst_errors
)
validated_params['navigator_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"navigator_id",
rqst_errors
)
validated_params['cm_client_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"cm_client_id",
rqst_errors,
none_allowed=True
)
validated_params['cm_sequence_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"cm_sequence_id",
rqst_errors,
none_allowed=True
)
if 'notes' in rqst_body:
validated_params['notes'] = clean_string_value_from_dict_object(
rqst_body,
"root",
"notes",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
)
if "datetime_completed" in rqst_body:
datetime_completed = clean_string_value_from_dict_object(
rqst_body,
"root",
"datetime_completed",
rqst_errors,
none_allowed=True
)
validated_datetime_completed = None
if datetime_completed:
try:
validated_datetime_completed = datetime.datetime.strptime(datetime_completed, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'datetime_completed must be a properly formatted datetime string in UTC, eg. YYYY-MM-DDTHH:MM:SS. Value is : {}'.format(
datetime_completed)
)
validated_params['datetime_completed'] = validated_datetime_completed
def validate_update_row_params(rqst_body, validated_params, rqst_errors):
if 'consumer_id' in rqst_body:
validated_params['consumer_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"consumer_id",
rqst_errors
)
if 'navigator_id' in rqst_body:
validated_params['navigator_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"navigator_id",
rqst_errors
)
if 'cm_client_id' in rqst_body:
validated_params['cm_client_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"cm_client_id",
rqst_errors,
none_allowed=True
)
if "cm_sequence_id" in rqst_body:
validated_params['cm_sequence_id'] = clean_int_value_from_dict_object(
rqst_body,
"root",
"cm_sequence_id",
rqst_errors,
none_allowed=True
)
if 'notes' in rqst_body:
validated_params['notes'] = clean_string_value_from_dict_object(
rqst_body,
"root",
"notes",
rqst_errors,
empty_string_allowed=True,
none_allowed=True
)
if "datetime_completed" in rqst_body:
datetime_completed = clean_string_value_from_dict_object(
rqst_body,
"root",
"datetime_completed",
rqst_errors,
none_allowed=True
)
validated_datetime_completed = None
if datetime_completed:
try:
validated_datetime_completed = datetime.datetime.strptime(datetime_completed, "%Y-%m-%dT%H:%M:%S").replace(tzinfo=pytz.UTC)
except ValueError:
rqst_errors.append(
'datetime_completed must be a properly formatted datetime string in UTC, eg. YYYY-MM-DDTHH:MM:SS. Value is : {}'.format(
datetime_completed)
)
validated_params['datetime_completed'] = validated_datetime_completed
| 32.805369
| 140
| 0.621727
| 549
| 4,888
| 5.083789
| 0.125683
| 0.080258
| 0.079183
| 0.115729
| 0.913293
| 0.913293
| 0.862773
| 0.853458
| 0.853458
| 0.838409
| 0
| 0
| 0.297872
| 4,888
| 148
| 141
| 33.027027
| 0.813228
| 0
| 0
| 0.728682
| 0
| 0.015504
| 0.154255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.031008
| 0
| 0.062016
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
11b8a76b644d8e8508d75241bb55c6197f6b6bb5
| 3,506
|
py
|
Python
|
tests/unit/test_profiler.py
|
ylathouris/bio
|
8261f6a730b46e783d54b562d6acc674b5b828ce
|
[
"MIT"
] | null | null | null |
tests/unit/test_profiler.py
|
ylathouris/bio
|
8261f6a730b46e783d54b562d6acc674b5b828ce
|
[
"MIT"
] | 1
|
2019-11-28T17:23:13.000Z
|
2020-07-31T21:19:16.000Z
|
tests/unit/test_profiler.py
|
ylathouris/bio
|
8261f6a730b46e783d54b562d6acc674b5b828ce
|
[
"MIT"
] | null | null | null |
from unittest import mock
import cProfile
import bio
def do_something():
sum([1, 1])
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_context_manager(mock_print, mock_dump):
"""
Test profiler context manager (with defaults).
This test demonstrates how to profile your code using the `profiler`
context manager. When used without any arguments, the output will
be written/printed to stdout.
"""
with bio.profiler():
do_something()
mock_print.assert_called_once()
mock_dump.assert_not_called()
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_context_manager_with_output_file(mock_print, mock_dump):
"""
Test profiler context manager with output file.
This test demonstrates how to use the `profiler` context manager.
In this case we're providing a file location for storing the
output data.
"""
path = "path/to/file.prof"
with bio.profiler(path):
do_something()
mock_print.assert_called_once()
mock_dump.assert_called_once_with(path)
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_context_manager_with_no_stdout(mock_print, mock_dump):
"""
Test profiler context manager with output file.
This test demonstrates how to use the `profiler` context manager.
In this case we're providing the `quiet=True` option to prevent
the output from being written to stdout (i.e. the console).
"""
path = "path/to/file.prof"
with bio.profiler(path, quiet=True):
do_something()
mock_print.assert_not_called()
mock_dump.assert_called_once_with(path)
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_decorator(mock_print, mock_dump):
"""
Test profiler decorator.
This test demonstrates how to profile code using the `profile`
function decorator.
"""
@bio.profile()
def do_something():
sum([1, 1])
do_something()
mock_print.assert_called_once()
mock_dump.assert_not_called()
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_decorator_with_output_file(mock_print, mock_dump):
"""
Test profiler decorator witth output file.
This test demonstrates how to profile code using the `profile`
function decorator. In this case we're providing a file location
for storing the output data.
"""
path = "path/to/file.prof"
@bio.profile(path)
def do_something():
sum([1, 1])
do_something()
mock_print.assert_called_once()
mock_dump.assert_called_once_with(path)
@mock.patch.object(cProfile.Profile, "dump_stats")
@mock.patch.object(cProfile.Profile, "print_stats")
def test_profiler_decorator_with_no_stdout(mock_print, mock_dump):
"""
Test profiler decorator with output file.
This test demonstrates how to use the `profiler` decorator.
In this case we're providing the `quiet=True` option to prevent
the output from being written to stdout (i.e. the console).
"""
path = "path/to/file.prof"
@bio.profile(path, quiet=True)
def do_something():
sum([1, 1])
do_something()
mock_print.assert_not_called()
mock_dump.assert_called_once_with(path)
| 27.825397
| 74
| 0.718768
| 490
| 3,506
| 4.936735
| 0.146939
| 0.044647
| 0.074411
| 0.114097
| 0.916081
| 0.916081
| 0.894998
| 0.863993
| 0.852418
| 0.778421
| 0
| 0.002786
| 0.181118
| 3,506
| 125
| 75
| 28.048
| 0.839777
| 0.33571
| 0
| 0.736842
| 0
| 0
| 0.089401
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.175439
| false
| 0
| 0.052632
| 0
| 0.22807
| 0.315789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eed4f52505f07451403ae784d9ad203682f862f3
| 182
|
py
|
Python
|
postcodeapi/utils.py
|
roedesh/postcodeapi
|
53a6a5578d9dbf0566ae0712ed33c596b2dc6e64
|
[
"MIT"
] | null | null | null |
postcodeapi/utils.py
|
roedesh/postcodeapi
|
53a6a5578d9dbf0566ae0712ed33c596b2dc6e64
|
[
"MIT"
] | 7
|
2018-11-23T15:00:55.000Z
|
2019-04-21T19:47:51.000Z
|
postcodeapi/utils.py
|
roedesh/postcodeapi
|
53a6a5578d9dbf0566ae0712ed33c596b2dc6e64
|
[
"MIT"
] | null | null | null |
import re
DUTCH_POSTAL_CODE_REGEX = re.compile("^[1-9][0-9]{3}\s?[a-zA-Z]{2}$")
def is_valid_postal_code(postal_code):
return bool(DUTCH_POSTAL_CODE_REGEX.match(postal_code))
| 22.75
| 69
| 0.736264
| 34
| 182
| 3.617647
| 0.647059
| 0.406504
| 0.243902
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.087912
| 182
| 7
| 70
| 26
| 0.704819
| 0
| 0
| 0
| 0
| 0.25
| 0.159341
| 0.159341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
e113067abb06fd2a23cd03c15e8ce879b9635f78
| 10,919
|
py
|
Python
|
LAMARCK_ML/nn_framework/nn_framework_test.py
|
JonasDHomburg/LAMARCK
|
0e372c908ff59effc6fd68e6477d04c4d89e6c26
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2019-09-20T08:03:47.000Z
|
2021-05-10T11:02:09.000Z
|
LAMARCK_ML/nn_framework/nn_framework_test.py
|
JonasDHomburg/LAMARCK_ML
|
0e372c908ff59effc6fd68e6477d04c4d89e6c26
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
LAMARCK_ML/nn_framework/nn_framework_test.py
|
JonasDHomburg/LAMARCK_ML
|
0e372c908ff59effc6fd68e6477d04c4d89e6c26
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import unittest
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import make_classification
from LAMARCK_ML.data_util import TypeShape, IOLabel, DFloat, Shape, DimNames
from LAMARCK_ML.datasets import UncorrelatedSupervised
from LAMARCK_ML.individuals import ClassifierIndividualACDG, ClassifierIndividualOPACDG
if tf.__version__ == '1.12.0':
from LAMARCK_ML.nn_framework.nvidia_tensorflow_1_12_0 import NVIDIATensorFlow
else:
from LAMARCK_ML.nn_framework.nvidia_tensorflow import NVIDIATensorFlow
from LAMARCK_ML.architectures.functions import *
@unittest.skipIf((os.environ.get('test_fast', False) in {'True','true', '1'}), 'time consuming')
class TestNVIDIATensorFlowFramework(unittest.TestCase):
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
def test_MLP_Dense_Merge(self):
train_samples = 1000
data_X, data_Y = make_classification(n_samples=train_samples,
n_features=20,
n_classes=5,
n_informative=4,
)
data_Y = tf.keras.utils.to_categorical(data_Y)
data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
train_X, test_X = data_X[:int(train_samples * .9), :], data_X[int(train_samples * .9):, :]
train_Y, test_Y = data_Y[:int(train_samples * .9), :], data_Y[int(train_samples * .9):, :]
batch = None
dataset = UncorrelatedSupervised(train_X=train_X,
train_Y=train_Y,
test_X=test_X,
test_Y=test_Y,
batch=batch,
typeShapes={IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.UNITS, 20))),
IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5)))},
name='Dataset')
ci = ClassifierIndividualACDG(**{
ClassifierIndividualACDG.arg_DATA_NTS: dict(
[(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]),
ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Dense, Merge],
})
NN = ci.network
f_ids = dict([(_id, None) for _, _id in NN.inputs.values()])
for _f in NN.functions:
f_ids[_f.id_name] = _f
for _f in NN.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
framework = NVIDIATensorFlow(**{
NVIDIATensorFlow.arg_DATA_SETS: [dataset],
})
ci.build_instance(framework)
framework.accuracy(ci)
framework.time()
framework.memory()
# framework.flops_per_sample()
# framework.parameters()
framework.reset()
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
def test_MLP_Dense_Merge_mutate(self):
train_samples = 1000
data_X, data_Y = make_classification(n_samples=train_samples,
n_features=20,
n_classes=5,
n_informative=4,
)
data_Y = tf.keras.utils.to_categorical(data_Y)
data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
train_X, test_X = data_X[:int(train_samples * .9), :], data_X[int(train_samples * .9):, :]
train_Y, test_Y = data_Y[:int(train_samples * .9), :], data_Y[int(train_samples * .9):, :]
batch = None
dataset = UncorrelatedSupervised(train_X=train_X,
train_Y=train_Y,
test_X=test_X,
test_Y=test_Y,
batch=batch,
typeShapes={IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.UNITS, 20))),
IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5)))},
name='Dataset')
ci = ClassifierIndividualACDG(**{
ClassifierIndividualACDG.arg_DATA_NTS: dict(
[(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]),
ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Dense, Merge],
})
ci = ci.mutate(1)[0]
NN = ci.network
f_ids = dict([(_id, None) for _, _id in NN.inputs.values()])
for _f in NN.functions:
f_ids[_f.id_name] = _f
for _f in NN.functions:
for _f_input, (other_output, other_id) in _f.inputs.items():
if other_id not in f_ids:
self.assertTrue(False)
stack = [f_id for _, f_id in NN.output_mapping.values()]
required_ids = set()
while stack:
f_id = stack.pop()
required_ids.add(f_id)
f_ = f_ids.get(f_id)
if f_ is not None:
stack.extend([f_id for _, f_id in f_.inputs.values()])
self.assertSetEqual(required_ids, set(f_ids.keys()))
framework = NVIDIATensorFlow(**{
NVIDIATensorFlow.arg_DATA_SETS: [dataset],
})
ci.build_instance(framework)
framework.accuracy(ci)
framework.time()
framework.memory()
# framework.flops_per_sample()
# framework.parameters()
framework.reset()
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
def test_Conv_Flatten_Pool_Dense_Merge(self):
train_samples = 1000
data_X, data_Y = make_classification(n_samples=train_samples,
n_features=3072,
n_classes=5,
n_informative=4,
)
data_X = data_X.reshape((train_samples, 32, 32, 3))
data_Y = tf.keras.utils.to_categorical(data_Y)
data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
train_X, test_X = data_X[:int(train_samples * .9), :], data_X[int(train_samples * .9):, :]
train_Y, test_Y = data_Y[:int(train_samples * .9), :], data_Y[int(train_samples * .9):, :]
batch = None
dataset = UncorrelatedSupervised(train_X=train_X,
train_Y=train_Y,
test_X=test_X,
test_Y=test_Y,
batch=batch,
typeShapes={IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.HEIGHT, 32),
(DimNames.WIDTH, 32),
(DimNames.CHANNEL, 3))),
IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5)))},
name='Dataset')
ci = ClassifierIndividualACDG(**{
ClassifierIndividualACDG.arg_DATA_NTS: dict(
[(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]),
ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Conv2D, Flatten, Dense, Merge],
ClassifierIndividualACDG.arg_MAX_NN_DEPTH: 10,
})
framework = NVIDIATensorFlow(**{
NVIDIATensorFlow.arg_DATA_SETS: [dataset],
})
ci.build_instance(framework)
framework.accuracy(ci)
framework.time()
framework.memory()
# framework.flops_per_sample()
# framework.parameters()
framework.reset()
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
def test_Conv_Flatten_Pool_Dense_Merge_mutate_recombine(self):
train_samples = 1000
data_X, data_Y = make_classification(n_samples=train_samples,
n_features=3072,
n_classes=5,
n_informative=4,
)
data_X = data_X.reshape((train_samples, 32, 32, 3))
data_Y = tf.keras.utils.to_categorical(data_Y)
data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
train_X, test_X = data_X[:int(train_samples * .9), :], data_X[int(train_samples * .9):, :]
train_Y, test_Y = data_Y[:int(train_samples * .9), :], data_Y[int(train_samples * .9):, :]
batch = None
dataset = UncorrelatedSupervised(train_X=train_X,
train_Y=train_Y,
test_X=test_X,
test_Y=test_Y,
batch=batch,
typeShapes={IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.HEIGHT, 32),
(DimNames.WIDTH, 32),
(DimNames.CHANNEL, 3))),
IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5)))},
name='Dataset')
ci = ClassifierIndividualACDG(**{
ClassifierIndividualACDG.arg_DATA_NTS: dict(
[(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]),
ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Conv2D, Pooling2D, Flatten, Dense, Merge],
ClassifierIndividualACDG.arg_MAX_NN_DEPTH: 10,
})
ci = ci.mutate(1)[0]
framework = NVIDIATensorFlow(**{
NVIDIATensorFlow.arg_DATA_SETS: [dataset],
})
ci.build_instance(framework)
state = ci.train_instance(framework)
ci.update_state(**state)
self.assertTrue(isinstance(framework.accuracy(None), float))
self.assertTrue(isinstance(framework.time(), float))
self.assertTrue(isinstance(framework.memory(), float))
# self.assertTrue(isinstance(framework.flops_per_sample(), float))
# self.assertTrue(isinstance(framework.parameters(), float))
framework.reset()
self.assertGreater(len(ci.network.variable_pool), 0)
ci2 = ClassifierIndividualACDG(**{
ClassifierIndividualACDG.arg_DATA_NTS: dict(
[(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]),
ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Conv2D, Pooling2D, Flatten, Dense, Merge],
ClassifierIndividualACDG.arg_MAX_NN_DEPTH: 10,
})
ci.build_instance(framework)
framework.reset()
ci_rec = ci.recombine(ci2)[0]
self.assertGreater(len(ci_rec.network.variable_pool), 0)
| 43.501992
| 112
| 0.567268
| 1,212
| 10,919
| 4.843234
| 0.122112
| 0.02385
| 0.040886
| 0.043612
| 0.875639
| 0.840716
| 0.840716
| 0.827087
| 0.827087
| 0.817547
| 0
| 0.015243
| 0.321092
| 10,919
| 250
| 113
| 43.676
| 0.776609
| 0.025552
| 0
| 0.846154
| 0
| 0
| 0.018249
| 0
| 0
| 0
| 0
| 0
| 0.043269
| 1
| 0.019231
| false
| 0
| 0.052885
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0145743b1ed2a13da7c4bd03d3420606ebed67e4
| 105
|
py
|
Python
|
backend/beedare/user_information/__init__.py
|
gijs3ntius/BeeDare
|
9ad5a93dad9b531b332aeb58f9b97e98585bc1ac
|
[
"Apache-2.0"
] | null | null | null |
backend/beedare/user_information/__init__.py
|
gijs3ntius/BeeDare
|
9ad5a93dad9b531b332aeb58f9b97e98585bc1ac
|
[
"Apache-2.0"
] | 17
|
2020-06-05T18:27:11.000Z
|
2022-03-11T23:24:50.000Z
|
backend/beedare/user_information/__init__.py
|
gijsentius/BeeDare
|
9ad5a93dad9b531b332aeb58f9b97e98585bc1ac
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
user_info_blueprint = Blueprint('user_info', __name__)
from . import views
| 17.5
| 54
| 0.8
| 14
| 105
| 5.5
| 0.571429
| 0.337662
| 0.441558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 105
| 5
| 55
| 21
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
6d728c8728d85b92bd38bf9c9aa4625485c03e1f
| 92
|
py
|
Python
|
parameters_8020.py
|
AnujBrandy/AdsIdeaInQBO
|
561b096a0e5db3acddbf9f1fc57d29ac8fe1791d
|
[
"BSD-3-Clause"
] | 2
|
2015-07-05T12:25:08.000Z
|
2015-07-05T15:39:32.000Z
|
parameters_8020.py
|
AnujBrandy/AdsIdeaInQBO
|
561b096a0e5db3acddbf9f1fc57d29ac8fe1791d
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8020.py
|
AnujBrandy/AdsIdeaInQBO
|
561b096a0e5db3acddbf9f1fc57d29ac8fe1791d
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$a652e2e1499cdf8c$3eb3c6d91c3eb578ef19cebe65b13866f70da95b"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.450549
| 0.01087
| 92
| 1
| 92
| 92
| 0.450549
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
0996bc8cb8466dd2c8911b023022afa3ba529f98
| 3,967
|
py
|
Python
|
worker/tests/unit/tasks/test_auth.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | null | null | null |
worker/tests/unit/tasks/test_auth.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | 1
|
2021-07-20T00:28:27.000Z
|
2021-07-20T00:28:27.000Z
|
worker/tests/unit/tasks/test_auth.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | null | null | null |
import base64
import json
from unittest.mock import ANY
from wattle.core.models.db.user import User
from wattle.worker.config import config as worker_config
from wattle.worker.tasks.auth import email_password_reset, email_verify
def test_email_verify_user_does_not_exist(mocker, monkeypatch):
session = mocker.patch("wattle.worker.tasks.auth.session_scope")
session.return_value.__enter__.return_value = "session"
mocker.patch.object(User, "get_by_email")
User.get_by_email.return_value = None
mock_create_verify_token = mocker.patch(
"wattle.worker.tasks.auth.create_verify_token"
)
email_verify("test@test.com")
User.get_by_email.assert_called_with("session", "test@test.com")
mock_create_verify_token.assert_not_called()
def test_email_verify(mocker, monkeypatch):
session = mocker.patch("wattle.worker.tasks.auth.session_scope")
session.return_value.__enter__.return_value = "session"
mocker.patch.object(User, "get_by_email")
user = User(email="test@test.com", id="user-id")
User.get_by_email.return_value = user
mock_create_verify_token = mocker.patch(
"wattle.worker.tasks.auth.create_verify_token"
)
mock_create_verify_token.return_value = "token"
mock_create_message = mocker.patch(
"wattle.worker.tasks.auth.create_message"
)
mock_create_message.return_value = "an_email"
mock_send_email = mocker.patch(
"wattle.worker.tasks.auth.send_email"
)
email_verify("test@test.com")
User.get_by_email.assert_called_with("session", "test@test.com")
mock_create_verify_token.assert_called_with(id=user.id)
# TODO: Properly test messages when templating in implemented
mock_create_message.assert_called_with(
f"{worker_config.FROM_EMAIL_NAME} <{worker_config.FROM_EMAIL}>",
user.email,
"Wattle - Verification",
ANY,
ANY
)
mock_send_email.assert_called_with(user.email, mock_create_message.return_value)
def test_email_password_reset_user_does_not_exist(mocker, monkeypatch):
session = mocker.patch("wattle.worker.tasks.auth.session_scope")
session.return_value.__enter__.return_value = "session"
mocker.patch.object(User, "get_by_email")
User.get_by_email.return_value = None
mock_create_password_reset_token = mocker.patch(
"wattle.worker.tasks.auth.create_password_reset_token"
)
email_password_reset("test@test.com")
User.get_by_email.assert_called_with("session", "test@test.com")
mock_create_password_reset_token.assert_not_called()
def test_email_password_reset(mocker, monkeypatch):
session = mocker.patch("wattle.worker.tasks.auth.session_scope")
session.return_value.__enter__.return_value = "session"
mocker.patch.object(User, "get_by_email")
user = User(
id="user-id", email="test@test.com", hashed_password="hashed-pass"
)
User.get_by_email.return_value = user
mock_create_password_reset_token = mocker.patch(
"wattle.worker.tasks.auth.create_password_reset_token"
)
mock_create_password_reset_token.return_value = "token"
mock_create_message = mocker.patch(
"wattle.worker.tasks.auth.create_message"
)
mock_create_message.return_value = "an_email"
mock_send_email = mocker.patch(
"wattle.worker.tasks.auth.send_email"
)
email_password_reset("test@test.com")
User.get_by_email.assert_called_with("session", "test@test.com")
mock_create_password_reset_token.assert_called_with(
id=user.id, hashed_password="hashed-pass"
)
# TODO: Properly test messages when templating in implemented
mock_create_message.assert_called_with(
f"{worker_config.FROM_EMAIL_NAME} <{worker_config.FROM_EMAIL}>",
user.email,
"Wattle - Password Reset",
ANY,
ANY
)
mock_send_email.assert_called_with(user.email, mock_create_message.return_value)
| 31.736
| 84
| 0.736073
| 535
| 3,967
| 5.082243
| 0.11215
| 0.072821
| 0.08128
| 0.100405
| 0.870541
| 0.848106
| 0.848106
| 0.819419
| 0.819419
| 0.798088
| 0
| 0.000602
| 0.161835
| 3,967
| 124
| 85
| 31.991935
| 0.817143
| 0.029997
| 0
| 0.613636
| 0
| 0
| 0.247594
| 0.158648
| 0
| 0
| 0
| 0.008065
| 0.136364
| 1
| 0.045455
| false
| 0.170455
| 0.068182
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
09d7238b67b1a62451ddf3e03c3d506c818b2f9c
| 74,545
|
py
|
Python
|
fhir/resources/STU3/tests/test_plandefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/STU3/tests/test_plandefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/STU3/tests/test_plandefinition.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/PlanDefinition
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import plandefinition
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class PlanDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("PlanDefinition", js["resourceType"])
return plandefinition.PlanDefinition(js)
def testPlanDefinition1(self):
inst = self.instantiate_from("plandefinition-example-kdn5-simplified.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition1(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition1(inst2)
def implPlanDefinition1(self, inst):
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[0]
.extension[0]
.extension[0]
.url
),
force_bytes("day"),
)
self.assertEqual(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[0]
.extension[0]
.extension[0]
.valueInteger,
1,
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[0]
.extension[0]
.extension[1]
.url
),
force_bytes("day"),
)
self.assertEqual(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[0]
.extension[0]
.extension[1]
.valueInteger,
8,
)
self.assertEqual(
force_bytes(
inst.action[0].action[0].action[0].action[0].action[0].extension[0].url
),
force_bytes("http://hl7.org/fhir/StructureDefinition/timing-daysOfCycle"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].action[0].action[0].id),
force_bytes("action-1"),
)
self.assertEqual(
force_bytes(
inst.action[0].action[0].action[0].action[0].action[0].textEquivalent
),
force_bytes("Gemcitabine 1250 mg/m² IV over 30 minutes on days 1 and 8"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[1]
.extension[0]
.extension[0]
.url
),
force_bytes("day"),
)
self.assertEqual(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[1]
.extension[0]
.extension[0]
.valueInteger,
1,
)
self.assertEqual(
force_bytes(
inst.action[0].action[0].action[0].action[0].action[1].extension[0].url
),
force_bytes("http://hl7.org/fhir/StructureDefinition/timing-daysOfCycle"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].action[0].action[1].id),
force_bytes("action-2"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[1]
.relatedAction[0]
.actionId
),
force_bytes("action-1"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.action[1]
.relatedAction[0]
.relationship
),
force_bytes("concurrent-with-start"),
)
self.assertEqual(
force_bytes(
inst.action[0].action[0].action[0].action[0].action[1].textEquivalent
),
force_bytes("CARBOplatin AUC 5 IV over 30 minutes on Day 1"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].action[0].id),
force_bytes("cycle-definition-1"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].action[0].textEquivalent),
force_bytes("21-day cycle for 6 cycles"),
)
self.assertEqual(
inst.action[0].action[0].action[0].action[0].timingTiming.repeat.count, 6
)
self.assertEqual(
inst.action[0].action[0].action[0].action[0].timingTiming.repeat.duration,
21,
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[0]
.action[0]
.action[0]
.timingTiming.repeat.durationUnit
),
force_bytes("d"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].groupingBehavior),
force_bytes("sentence-group"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].selectionBehavior),
force_bytes("exactly-one"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].selectionBehavior), force_bytes("all")
)
self.assertEqual(
force_bytes(inst.action[0].selectionBehavior), force_bytes("exactly-one")
)
self.assertEqual(inst.approvalDate.date, FHIRDate("2016-07-27").date)
self.assertEqual(inst.approvalDate.as_json(), "2016-07-27")
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("1111"))
self.assertEqual(force_bytes(inst.contained[1].id), force_bytes("2222"))
self.assertEqual(
force_bytes(inst.contributor[0].name), force_bytes("Lee Surprenant")
)
self.assertEqual(force_bytes(inst.contributor[0].type), force_bytes("author"))
self.assertEqual(
force_bytes(inst.copyright), force_bytes("All rights reserved.")
)
self.assertTrue(inst.experimental)
self.assertEqual(force_bytes(inst.id), force_bytes("KDN5"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://example.org/ordertemplates"),
)
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("KDN5"))
self.assertEqual(inst.lastReviewDate.date, FHIRDate("2016-07-27").date)
self.assertEqual(inst.lastReviewDate.as_json(), "2016-07-27")
self.assertEqual(
force_bytes(inst.publisher),
force_bytes("National Comprehensive Cancer Network, Inc."),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].display),
force_bytes("NCCN Guidelines for Kidney Cancer. V.2.2016"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].url),
force_bytes(
"http://www.example.org/professionals/physician_gls/PDF/kidney.pdf"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].citation),
force_bytes("Oudard S, et al. J Urol. 2007;177(5):1698-702"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].type), force_bytes("citation")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].url),
force_bytes("http://www.ncbi.nlm.nih.gov/pubmed/17437788"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("additional"))
self.assertEqual(
force_bytes(inst.title), force_bytes("Gemcitabine/CARBOplatin")
)
self.assertEqual(
force_bytes(inst.type.text), force_bytes("Chemotherapy Order Template")
)
self.assertEqual(
force_bytes(inst.useContext[0].code.code),
force_bytes("treamentSetting-or-diseaseStatus"),
)
self.assertEqual(
force_bytes(inst.useContext[0].code.system),
force_bytes("http://example.org/fhir/CodeSystem/indications"),
)
self.assertEqual(
force_bytes(inst.useContext[0].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[0].extension[0].valueString), force_bytes("A")
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.text),
force_bytes("Metastatic"),
)
self.assertEqual(
force_bytes(inst.useContext[1].code.code),
force_bytes("disease-or-histology"),
)
self.assertEqual(
force_bytes(inst.useContext[1].code.system),
force_bytes("http://example.org/fhir/CodeSystem/indications"),
)
self.assertEqual(
force_bytes(inst.useContext[1].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[1].extension[0].valueString), force_bytes("A")
)
self.assertEqual(
force_bytes(inst.useContext[1].valueCodeableConcept.text),
force_bytes("Collecting Duct/Medullary Subtypes"),
)
self.assertEqual(
force_bytes(inst.useContext[2].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[2].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[2].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[2].extension[0].valueString), force_bytes("A")
)
self.assertEqual(
force_bytes(inst.useContext[2].valueCodeableConcept.text),
force_bytes("Kidney Cancer"),
)
self.assertEqual(
force_bytes(inst.useContext[3].code.code),
force_bytes("treatmentSetting-or-diseaseStatus"),
)
self.assertEqual(
force_bytes(inst.useContext[3].code.system),
force_bytes("http://example.org/fhir/CodeSystem/indications"),
)
self.assertEqual(
force_bytes(inst.useContext[3].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[3].extension[0].valueString), force_bytes("B")
)
self.assertEqual(
force_bytes(inst.useContext[3].valueCodeableConcept.text),
force_bytes("Relapsed"),
)
self.assertEqual(
force_bytes(inst.useContext[4].code.code),
force_bytes("disease-or-histology"),
)
self.assertEqual(
force_bytes(inst.useContext[4].code.system),
force_bytes("http://example.org/fhir/CodeSystem/indications"),
)
self.assertEqual(
force_bytes(inst.useContext[4].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[4].extension[0].valueString), force_bytes("B")
)
self.assertEqual(
force_bytes(inst.useContext[4].valueCodeableConcept.text),
force_bytes("Collecting Duct/Medullary Subtypes"),
)
self.assertEqual(
force_bytes(inst.useContext[5].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[5].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[5].extension[0].url),
force_bytes("http://hl7.org/fhir/StructureDefinition/usagecontext-group"),
)
self.assertEqual(
force_bytes(inst.useContext[5].extension[0].valueString), force_bytes("B")
)
self.assertEqual(
force_bytes(inst.useContext[5].valueCodeableConcept.text),
force_bytes(
"Kidney Cancer – Collecting Duct/Medullary Subtypes - Metastatic"
),
)
self.assertEqual(force_bytes(inst.version), force_bytes("1"))
def testPlanDefinition2(self):
inst = self.instantiate_from("plandefinition-options-example.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition2(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition2(inst2)
def implPlanDefinition2(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].id), force_bytes("medication-action-1")
)
self.assertEqual(
force_bytes(inst.action[0].action[0].title),
force_bytes("Administer Medication 1"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].id), force_bytes("medication-action-2")
)
self.assertEqual(
force_bytes(inst.action[0].action[1].relatedAction[0].actionId),
force_bytes("medication-action-1"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].relatedAction[0].offsetDuration.unit),
force_bytes("h"),
)
self.assertEqual(
inst.action[0].action[1].relatedAction[0].offsetDuration.value, 1
)
self.assertEqual(
force_bytes(inst.action[0].action[1].relatedAction[0].relationship),
force_bytes("after-end"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].title),
force_bytes("Administer Medication 2"),
)
self.assertEqual(
force_bytes(inst.action[0].groupingBehavior), force_bytes("logical-group")
)
self.assertEqual(
force_bytes(inst.action[0].selectionBehavior), force_bytes("all")
)
self.assertEqual(
force_bytes(inst.contained[0].id),
force_bytes("activitydefinition-medicationrequest-1"),
)
self.assertEqual(
force_bytes(inst.contained[1].id),
force_bytes("activitydefinition-medicationrequest-2"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("options-example"))
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">[Put rendering here]</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title),
force_bytes("This example illustrates relationships between actions."),
)
def testPlanDefinition3(self):
inst = self.instantiate_from(
"plandefinition-exclusive-breastfeeding-intervention-02.json"
)
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition3(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition3(inst2)
def implPlanDefinition3(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].expression),
force_bytes("Communication Request to Provider"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].path), force_bytes("/")
)
self.assertEqual(
force_bytes(inst.action[0].action[0].textEquivalent),
force_bytes(
"A Breastfeeding Readiness Assessment is recommended, please authorize or reject the order."
),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].title),
force_bytes("Notify the provider to sign the order."),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].type.code), force_bytes("create")
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("Should Notify Provider to Sign Assessment Order"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes(
"Mother should be administered a breastfeeding readiness assessment."
),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].eventName),
force_bytes("Admission"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].eventName),
force_bytes("Birth"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].eventName),
force_bytes("Infant Transfer to Recovery"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].eventName),
force_bytes("Transfer to Post-Partum"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].type),
force_bytes("named-event"),
)
self.assertEqual(inst.date.date, FHIRDate("2015-03-08").date)
self.assertEqual(inst.date.as_json(), "2015-03-08")
self.assertEqual(
force_bytes(inst.id), force_bytes("exclusive-breastfeeding-intervention-02")
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("exclusive-breastfeeding-intervention-02"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title),
force_bytes("Exclusive Breastfeeding Intervention-02"),
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Exclusive Breastfeeding")
)
self.assertEqual(force_bytes(inst.version), force_bytes("1.0.0"))
def testPlanDefinition4(self):
inst = self.instantiate_from(
"plandefinition-exclusive-breastfeeding-intervention-03.json"
)
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition4(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition4(inst2)
def implPlanDefinition4(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].expression),
force_bytes("Communication Request to Charge Nurse"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].path), force_bytes("/")
)
self.assertEqual(
force_bytes(inst.action[0].action[0].textEquivalent),
force_bytes(
"A Breastfeeding Readiness Assessment is recommended, please administer the assessment."
),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].title),
force_bytes("Notify the charge nurse to perform the assessment."),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].type.code), force_bytes("create")
)
self.assertEqual(
force_bytes(inst.action[0].action[1].dynamicValue[0].expression),
force_bytes("Communication Request to Bedside Nurse"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].dynamicValue[0].path), force_bytes("/")
)
self.assertEqual(
force_bytes(inst.action[0].action[1].textEquivalent),
force_bytes(
"A Breastfeeding Readiness Assessment is recommended, please administer the assessment."
),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].title),
force_bytes("Notify the bedside nurse to perform the assessment."),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].type.code), force_bytes("create")
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("Should Notify Nurse to Perform Assessment"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes(
"Mother should be administered a breastfeeding readiness assessment."
),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].eventName),
force_bytes("Admission"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].eventName),
force_bytes("Birth"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].eventName),
force_bytes("Infant Transfer to Recovery"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].eventName),
force_bytes("Transfer to Post-Partum"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].type),
force_bytes("named-event"),
)
self.assertEqual(inst.date.date, FHIRDate("2015-03-08").date)
self.assertEqual(inst.date.as_json(), "2015-03-08")
self.assertEqual(
force_bytes(inst.id), force_bytes("exclusive-breastfeeding-intervention-03")
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("exclusive-breastfeeding-intervention-03"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title),
force_bytes("Exclusive Breastfeeding Intervention-03"),
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Exclusive Breastfeeding")
)
self.assertEqual(force_bytes(inst.version), force_bytes("1.0.0"))
def testPlanDefinition5(self):
inst = self.instantiate_from("plandefinition-protocol-example.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition5(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition5(inst2)
def implPlanDefinition5(self, inst):
self.assertEqual(
force_bytes(inst.action[0].cardinalityBehavior), force_bytes("single")
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes(
"exists ([Condition: Obesity]) or not exists ([Observation: BMI] O where O.effectiveDateTime 2 years or less before Today())"
),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].language), force_bytes("text/cql")
)
self.assertEqual(
force_bytes(inst.action[0].goalId[0]), force_bytes("reduce-bmi-ratio")
)
self.assertEqual(force_bytes(inst.action[0].label), force_bytes("Measure BMI"))
self.assertEqual(
force_bytes(inst.action[0].requiredBehavior),
force_bytes("must-unless-documented"),
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes("Measure, Weight, Height, Waist, Circumference; Calculate BMI"),
)
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("procedure"))
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[0].system),
force_bytes("url"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[0].value),
force_bytes("https://www.nhlbi.nih.gov/health-pro/guidelines"),
)
self.assertEqual(
force_bytes(inst.contributor[0].name),
force_bytes("National Heart, Lung, and Blood Institute"),
)
self.assertEqual(force_bytes(inst.contributor[0].type), force_bytes("author"))
self.assertEqual(
force_bytes(inst.goal[0].addresses[0].coding[0].code),
force_bytes("414916001"),
)
self.assertEqual(
force_bytes(inst.goal[0].addresses[0].coding[0].display),
force_bytes("Obesity (disorder)"),
)
self.assertEqual(
force_bytes(inst.goal[0].addresses[0].coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.goal[0].category.text), force_bytes("Treatment")
)
self.assertEqual(
force_bytes(inst.goal[0].description.text),
force_bytes("Reduce BMI to below 25"),
)
self.assertEqual(
force_bytes(inst.goal[0].documentation[0].display),
force_bytes("Evaluation and Treatment Strategy"),
)
self.assertEqual(
force_bytes(inst.goal[0].documentation[0].type),
force_bytes("justification"),
)
self.assertEqual(
force_bytes(inst.goal[0].documentation[0].url),
force_bytes(
"https://www.nhlbi.nih.gov/health-pro/guidelines/current/obesity-guidelines/e_textbook/txgd/42.htm"
),
)
self.assertEqual(force_bytes(inst.goal[0].id), force_bytes("reduce-bmi-ratio"))
self.assertEqual(
force_bytes(inst.goal[0].priority.text), force_bytes("medium-priority")
)
self.assertEqual(
force_bytes(inst.goal[0].start.text),
force_bytes("When the patient's BMI Ratio is at or above 25"),
)
self.assertEqual(
force_bytes(inst.goal[0].target[0].detailRange.high.unit),
force_bytes("kg/m2"),
)
self.assertEqual(inst.goal[0].target[0].detailRange.high.value, 24.9)
self.assertEqual(force_bytes(inst.goal[0].target[0].due.unit), force_bytes("a"))
self.assertEqual(inst.goal[0].target[0].due.value, 1)
self.assertEqual(
force_bytes(inst.goal[0].target[0].measure.coding[0].code),
force_bytes("39156-5"),
)
self.assertEqual(
force_bytes(inst.goal[0].target[0].measure.coding[0].display),
force_bytes("Body mass index (BMI) [Ratio]"),
)
self.assertEqual(
force_bytes(inst.goal[0].target[0].measure.coding[0].system),
force_bytes("http://loinc.org"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("protocol-example"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("http://acme.org")
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("example-1")
)
self.assertEqual(
force_bytes(inst.purpose),
force_bytes(
"Example of A medical algorithm for assessment and treatment of overweight and obesity"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].display),
force_bytes("Overweight and Obesity Treatment Guidelines"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].url),
force_bytes(
"http://www.nhlbi.nih.gov/health-pro/guidelines/current/obesity-guidelines/e_textbook/txgd/algorthm/algorthm.htm"
),
)
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title), force_bytes("Obesity Assessment Protocol")
)
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("protocol"))
self.assertEqual(
force_bytes(inst.useContext[0].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].code),
force_bytes("414916001"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].display),
force_bytes("Obesity (disorder)"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
def testPlanDefinition6(self):
inst = self.instantiate_from("plandefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition6(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition6(inst2)
def implPlanDefinition6(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[0].expression),
force_bytes("Now()"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[0].path),
force_bytes("timing.event"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[1].expression),
force_bytes(
"Code '261QM0850X' from SuicideRiskLogic.\"NUCC Provider Taxonomy\" display 'Adult Mental Health'"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[1].path),
force_bytes("specialty"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[2].expression),
force_bytes("SuicideRiskLogic.ReferralRequestFulfillmentTime"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[2].path),
force_bytes("occurrenceDateTime"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[3].expression),
force_bytes("SuicideRiskLogic.Patient"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[3].path),
force_bytes("subject"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[4].expression),
force_bytes("SuicideRiskLogic.Practitioner"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[4].path),
force_bytes("requester.agent"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[5].expression),
force_bytes("SuicideRiskLogic.RiskAssessmentScore"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[5].path),
force_bytes("reasonCode"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[6].expression),
force_bytes("SuicideRiskLogic.RiskAssessment"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].dynamicValue[6].path),
force_bytes("reasonReference"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].action[0].textEquivalent),
force_bytes(
"Refer to outpatient mental health program for evaluation and treatment of mental health conditions now"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].groupingBehavior),
force_bytes("logical-group"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].selectionBehavior), force_bytes("any")
)
self.assertEqual(
force_bytes(inst.action[0].action[0].title),
force_bytes("Consults and Referrals"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[0]
.expression
),
force_bytes("'draft'"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[0]
.path
),
force_bytes("status"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[1]
.expression
),
force_bytes("SuicideRiskLogic.Patient"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[1]
.path
),
force_bytes("patient"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[2]
.expression
),
force_bytes("SuicideRiskLogic.Practitioner"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[2]
.path
),
force_bytes("prescriber"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[3]
.expression
),
force_bytes("SuicideRiskLogic.RiskAssessmentScore"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[3]
.path
),
force_bytes("reasonCode"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[4]
.expression
),
force_bytes("SuicideRiskLogic.RiskAssessment"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.action[0]
.dynamicValue[4]
.path
),
force_bytes("reasonReference"),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].action[0].textEquivalent
),
force_bytes(
"citalopram 20 mg tablet 1 tablet oral 1 time daily now (30 table; 3 refills)"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].action[1].textEquivalent
),
force_bytes(
"escitalopram 10 mg tablet 1 tablet oral 1 time daily now (30 tablet; 3 refills)"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].action[2].textEquivalent
),
force_bytes(
"fluoxetine 20 mg capsule 1 capsule oral 1 time daily now (30 tablet; 3 refills)"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].action[3].textEquivalent
),
force_bytes(
"paroxetine 20 mg tablet 1 tablet oral 1 time daily now (30 tablet; 3 refills)"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].action[4].textEquivalent
),
force_bytes(
"sertraline 50 mg tablet 1 tablet oral 1 time daily now (30 tablet; 3 refills)"
),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.documentation[0]
.document.contentType
),
force_bytes("text/html"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.documentation[0]
.document.title
),
force_bytes(
"National Library of Medicine. DailyMed website. CITALOPRAM- citalopram hydrobromide tablet, film coated."
),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.action[0]
.documentation[0]
.document.url
),
force_bytes(
"http://dailymed.nlm.nih.gov/dailymed/drugInfo.cfm?setid=6daeb45c-451d-b135-bf8f-2d6dff4b6b01"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].action[0].documentation[0].type
),
force_bytes("citation"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[0].groupingBehavior),
force_bytes("logical-group"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[0].selectionBehavior),
force_bytes("at-most-one"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[0].title),
force_bytes(
"Selective Serotonin Reuptake Inhibitors (Choose a mazimum of one or document reasons for exception)"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[1].textEquivalent),
force_bytes(
"Dopamine Norepinephrine Reuptake Inhibitors (Choose a maximum of one or document reasons for exception)"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[2].textEquivalent),
force_bytes(
"Serotonin Norepinephrine Reuptake Inhibitors (Choose a maximum of one or doument reasons for exception)"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].action[3].textEquivalent),
force_bytes(
"Norepinephrine-Serotonin Modulators (Choose a maximum of one or document reasons for exception)"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].documentation[0].document.contentType
),
force_bytes("text/html"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.documentation[0]
.document.extension[0]
.url
),
force_bytes(
"http://hl7.org/fhir/StructureDefinition/cqif-qualityOfEvidence"
),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.documentation[0]
.document.extension[0]
.valueCodeableConcept.coding[0]
.code
),
force_bytes("high"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.documentation[0]
.document.extension[0]
.valueCodeableConcept.coding[0]
.system
),
force_bytes("http://hl7.org/fhir/evidence-quality"),
)
self.assertEqual(
force_bytes(
inst.action[0]
.action[1]
.action[0]
.documentation[0]
.document.extension[0]
.valueCodeableConcept.text
),
force_bytes("High Quality"),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].documentation[0].document.title
),
force_bytes(
"Practice Guideline for the Treatment of Patients with Major Depressive Disorder"
),
)
self.assertEqual(
force_bytes(
inst.action[0].action[1].action[0].documentation[0].document.url
),
force_bytes(
"http://psychiatryonline.org/pb/assets/raw/sitewide/practice_guidelines/guidelines/mdd.pdf"
),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].documentation[0].type),
force_bytes("citation"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].groupingBehavior),
force_bytes("logical-group"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].selectionBehavior),
force_bytes("at-most-one"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].action[0].title),
force_bytes("First-Line Antidepressants"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].groupingBehavior),
force_bytes("logical-group"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].selectionBehavior),
force_bytes("at-most-one"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].title), force_bytes("Medications")
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes("Suicide Risk Assessment and Outpatient Management"),
)
self.assertEqual(inst.approvalDate.date, FHIRDate("2016-03-12").date)
self.assertEqual(inst.approvalDate.as_json(), "2016-03-12")
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].system), force_bytes("phone")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].use), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[0].value), force_bytes("415-362-4007")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].system), force_bytes("email")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].use), force_bytes("work")
)
self.assertEqual(
force_bytes(inst.contact[0].telecom[1].value),
force_bytes("info@motivemi.com"),
)
self.assertEqual(
force_bytes(inst.contained[0].id), force_bytes("referralToMentalHealthCare")
)
self.assertEqual(
force_bytes(inst.contained[1].id), force_bytes("citalopramPrescription")
)
self.assertEqual(
force_bytes(inst.contained[2].id), force_bytes("citalopramMedication")
)
self.assertEqual(
force_bytes(inst.contained[3].id), force_bytes("citalopramSubstance")
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[0].system),
force_bytes("phone"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[0].use),
force_bytes("work"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[0].value),
force_bytes("415-362-4007"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[1].system),
force_bytes("email"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[1].use),
force_bytes("work"),
)
self.assertEqual(
force_bytes(inst.contributor[0].contact[0].telecom[1].value),
force_bytes("info@motivemi.com"),
)
self.assertEqual(
force_bytes(inst.contributor[0].name),
force_bytes("Motive Medical Intelligence"),
)
self.assertEqual(force_bytes(inst.contributor[0].type), force_bytes("author"))
self.assertEqual(
force_bytes(inst.copyright),
force_bytes(
"© Copyright 2016 Motive Medical Intelligence. All rights reserved."
),
)
self.assertEqual(inst.date.date, FHIRDate("2015-08-15").date)
self.assertEqual(inst.date.as_json(), "2015-08-15")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Orders to be applied to a patient characterized as low suicide risk."
),
)
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2017-12-31").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2017-12-31")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2016-01-01")
self.assertTrue(inst.experimental)
self.assertEqual(
force_bytes(inst.id), force_bytes("low-suicide-risk-order-set")
)
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://motivemi.com/artifacts"),
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("mmi:low-suicide-risk-order-set"),
)
self.assertEqual(
force_bytes(inst.jurisdiction[0].coding[0].code), force_bytes("US")
)
self.assertEqual(
force_bytes(inst.jurisdiction[0].coding[0].system),
force_bytes("urn:iso:std:iso:3166"),
)
self.assertEqual(inst.lastReviewDate.date, FHIRDate("2016-08-15").date)
self.assertEqual(inst.lastReviewDate.as_json(), "2016-08-15")
self.assertEqual(force_bytes(inst.name), force_bytes("LowSuicideRiskOrderSet"))
self.assertEqual(
force_bytes(inst.publisher), force_bytes("Motive Medical Intelligence")
)
self.assertEqual(
force_bytes(inst.purpose),
force_bytes(
"This order set helps ensure consistent application of appropriate orders for the care of low suicide risk patients."
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].display),
force_bytes(
"Practice Guideline for the Treatment of Patients with Major Depressive Disorder"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].url),
force_bytes(
"http://psychiatryonline.org/pb/assets/raw/sitewide/practice_guidelines/guidelines/mdd.pdf"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].type), force_bytes("composed-of")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[2].type), force_bytes("composed-of")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title), force_bytes("Low Suicide Risk Order Set")
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Suicide risk assessment")
)
self.assertEqual(
force_bytes(inst.url),
force_bytes(
"http://motivemi.com/artifacts/PlanDefinition/low-suicide-risk-order-set"
),
)
self.assertEqual(
force_bytes(inst.usage),
force_bytes(
"This order set should be applied after assessing a patient for suicide risk, when the findings of that assessment indicate the patient has low suicide risk."
),
)
self.assertEqual(force_bytes(inst.useContext[0].code.code), force_bytes("age"))
self.assertEqual(
force_bytes(inst.useContext[0].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].code),
force_bytes("D000328"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].display),
force_bytes("Adult"),
)
self.assertEqual(
force_bytes(inst.useContext[0].valueCodeableConcept.coding[0].system),
force_bytes("https://meshb.nlm.nih.gov"),
)
self.assertEqual(
force_bytes(inst.useContext[1].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[1].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[1].valueCodeableConcept.coding[0].code),
force_bytes("87512008"),
)
self.assertEqual(
force_bytes(inst.useContext[1].valueCodeableConcept.coding[0].display),
force_bytes("Mild major depression"),
)
self.assertEqual(
force_bytes(inst.useContext[1].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.useContext[2].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[2].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[2].valueCodeableConcept.coding[0].code),
force_bytes("40379007"),
)
self.assertEqual(
force_bytes(inst.useContext[2].valueCodeableConcept.coding[0].display),
force_bytes("Major depression, recurrent, mild"),
)
self.assertEqual(
force_bytes(inst.useContext[2].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.useContext[3].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[3].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[3].valueCodeableConcept.coding[0].code),
force_bytes("394687007"),
)
self.assertEqual(
force_bytes(inst.useContext[3].valueCodeableConcept.coding[0].display),
force_bytes("Low suicide risk"),
)
self.assertEqual(
force_bytes(inst.useContext[3].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.useContext[4].code.code), force_bytes("focus")
)
self.assertEqual(
force_bytes(inst.useContext[4].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[4].valueCodeableConcept.coding[0].code),
force_bytes("225337009"),
)
self.assertEqual(
force_bytes(inst.useContext[4].valueCodeableConcept.coding[0].display),
force_bytes("Suicide risk assessment"),
)
self.assertEqual(
force_bytes(inst.useContext[4].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.useContext[5].code.code), force_bytes("user"))
self.assertEqual(
force_bytes(inst.useContext[5].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[5].valueCodeableConcept.coding[0].code),
force_bytes("309343006"),
)
self.assertEqual(
force_bytes(inst.useContext[5].valueCodeableConcept.coding[0].display),
force_bytes("Physician"),
)
self.assertEqual(
force_bytes(inst.useContext[5].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(
force_bytes(inst.useContext[6].code.code), force_bytes("venue")
)
self.assertEqual(
force_bytes(inst.useContext[6].code.system),
force_bytes("http://hl7.org/fhir/usage-context-type"),
)
self.assertEqual(
force_bytes(inst.useContext[6].valueCodeableConcept.coding[0].code),
force_bytes("440655000"),
)
self.assertEqual(
force_bytes(inst.useContext[6].valueCodeableConcept.coding[0].display),
force_bytes("Outpatient environment"),
)
self.assertEqual(
force_bytes(inst.useContext[6].valueCodeableConcept.coding[0].system),
force_bytes("http://snomed.info/sct"),
)
self.assertEqual(force_bytes(inst.version), force_bytes("1.0.0"))
def testPlanDefinition7(self):
inst = self.instantiate_from(
"plandefinition-exclusive-breastfeeding-intervention-04.json"
)
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition7(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition7(inst2)
def implPlanDefinition7(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].expression),
force_bytes("Create Lactation Consult Request"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].dynamicValue[0].path), force_bytes("/")
)
self.assertEqual(
force_bytes(inst.action[0].action[0].textEquivalent),
force_bytes("Create a lactation consult request"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].title),
force_bytes("Create a lactation consult request."),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].type.code), force_bytes("create")
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("Should Create Lactation Consult"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes(
"Mother should be referred to a lactation specialist for consultation."
),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].eventName),
force_bytes("Admission"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].eventName),
force_bytes("Birth"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[1].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].eventName),
force_bytes("Infant Transfer to Recovery"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[2].type),
force_bytes("named-event"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].eventName),
force_bytes("Transfer to Post-Partum"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[3].type),
force_bytes("named-event"),
)
self.assertEqual(inst.date.date, FHIRDate("2015-03-08").date)
self.assertEqual(inst.date.as_json(), "2015-03-08")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Exclusive breastfeeding intervention intended to improve outcomes for exclusive breastmilk feeding of newborns by creating a lactation consult for the mother if appropriate."
),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("exclusive-breastfeeding-intervention-04")
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("exclusive-breastfeeding-intervention-04"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title),
force_bytes("Exclusive Breastfeeding Intervention-04"),
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Exclusive Breastfeeding")
)
self.assertEqual(force_bytes(inst.version), force_bytes("1.0.0"))
def testPlanDefinition8(self):
inst = self.instantiate_from("plandefinition-predecessor-example.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition8(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition8(inst2)
def implPlanDefinition8(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].condition[0].expression),
force_bytes("Should Administer Zika Virus Exposure Assessment"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].condition[0].expression),
force_bytes("Should Order Serum + Urine rRT-PCR Test"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[2].condition[0].expression),
force_bytes("Should Order Serum Zika Virus IgM + Dengue Virus IgM"),
)
self.assertEqual(
force_bytes(inst.action[0].action[2].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[3].condition[0].expression),
force_bytes("Should Consider IgM Antibody Testing"),
)
self.assertEqual(
force_bytes(inst.action[0].action[3].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[4].condition[0].expression),
force_bytes("Should Provide Mosquito Prevention and Contraception Advice"),
)
self.assertEqual(
force_bytes(inst.action[0].action[4].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("Is Patient Pregnant"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].title), force_bytes("Zika Virus Assessment")
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].eventName),
force_bytes("patient-view"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].type),
force_bytes("named-event"),
)
self.assertEqual(inst.date.date, FHIRDate("2016-11-14").date)
self.assertEqual(inst.date.as_json(), "2016-11-14")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Zika Virus Management intervention describing the CDC Guidelines for Zika Virus Reporting and Management."
),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("zika-virus-intervention-initial")
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("zika-virus-intervention"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].url),
force_bytes(
"https://www.cdc.gov/mmwr/volumes/65/wr/mm6539e1.htm?s_cid=mm6539e1_w"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].type), force_bytes("successor")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title), force_bytes("Example Zika Virus Intervention")
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Zika Virus Management")
)
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://example.org/PlanDefinition/zika-virus-intervention"),
)
self.assertEqual(force_bytes(inst.version), force_bytes("1.0.0"))
def testPlanDefinition9(self):
inst = self.instantiate_from("plandefinition-zika-virus-intervention.json")
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition9(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition9(inst2)
def implPlanDefinition9(self, inst):
self.assertEqual(
force_bytes(inst.action[0].action[0].condition[0].expression),
force_bytes("Should Administer Zika Virus Exposure Assessment"),
)
self.assertEqual(
force_bytes(inst.action[0].action[0].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].condition[0].expression),
force_bytes("Should Order Serum + Urine rRT-PCR Test"),
)
self.assertEqual(
force_bytes(inst.action[0].action[1].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[2].condition[0].expression),
force_bytes("Should Order Serum Zika Virus IgM + Dengue Virus IgM"),
)
self.assertEqual(
force_bytes(inst.action[0].action[2].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[3].condition[0].expression),
force_bytes("Should Consider IgM Antibody Testing"),
)
self.assertEqual(
force_bytes(inst.action[0].action[3].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].action[4].condition[0].expression),
force_bytes("Should Provide Mosquito Prevention and Contraception Advice"),
)
self.assertEqual(
force_bytes(inst.action[0].action[4].condition[0].kind),
force_bytes("applicability"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("Is Patient Pregnant"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].title), force_bytes("Zika Virus Assessment")
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].eventName),
force_bytes("patient-view"),
)
self.assertEqual(
force_bytes(inst.action[0].triggerDefinition[0].type),
force_bytes("named-event"),
)
self.assertEqual(inst.date.date, FHIRDate("2017-01-12").date)
self.assertEqual(inst.date.as_json(), "2017-01-12")
self.assertEqual(
force_bytes(inst.description),
force_bytes(
"Zika Virus Management intervention describing the CDC Guidelines for Zika Virus Reporting and Management."
),
)
self.assertEqual(force_bytes(inst.id), force_bytes("zika-virus-intervention"))
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("zika-virus-intervention"),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].type), force_bytes("derived-from")
)
self.assertEqual(
force_bytes(inst.relatedArtifact[0].url),
force_bytes(
"https://www.cdc.gov/mmwr/volumes/65/wr/mm6539e1.htm?s_cid=mm6539e1_w"
),
)
self.assertEqual(
force_bytes(inst.relatedArtifact[1].type), force_bytes("predecessor")
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title), force_bytes("Example Zika Virus Intervention")
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Zika Virus Management")
)
self.assertEqual(
force_bytes(inst.url),
force_bytes("http://example.org/PlanDefinition/zika-virus-intervention"),
)
self.assertEqual(force_bytes(inst.version), force_bytes("2.0.0"))
def testPlanDefinition10(self):
inst = self.instantiate_from(
"plandefinition-chlamydia-screening-intervention.json"
)
self.assertIsNotNone(inst, "Must have instantiated a PlanDefinition instance")
self.implPlanDefinition10(inst)
js = inst.as_json()
self.assertEqual("PlanDefinition", js["resourceType"])
inst2 = plandefinition.PlanDefinition(js)
self.implPlanDefinition10(inst2)
def implPlanDefinition10(self, inst):
self.assertEqual(
force_bytes(inst.action[0].condition[0].expression),
force_bytes("NoScreening"),
)
self.assertEqual(
force_bytes(inst.action[0].condition[0].kind), force_bytes("applicability")
)
self.assertEqual(
force_bytes(inst.action[0].dynamicValue[0].expression),
force_bytes("ChlamydiaScreeningRequest"),
)
self.assertEqual(
force_bytes(inst.action[0].dynamicValue[0].path), force_bytes("~")
)
self.assertEqual(
force_bytes(inst.action[0].title),
force_bytes(
"Patient has not had chlamydia screening within the recommended timeframe..."
),
)
self.assertEqual(inst.date.date, FHIRDate("2015-07-22").date)
self.assertEqual(inst.date.as_json(), "2015-07-22")
self.assertEqual(
force_bytes(inst.description),
force_bytes("Chlamydia Screening CDS Example Using Common"),
)
self.assertEqual(
force_bytes(inst.id), force_bytes("chlamydia-screening-intervention")
)
self.assertEqual(force_bytes(inst.identifier[0].use), force_bytes("official"))
self.assertEqual(
force_bytes(inst.identifier[0].value),
force_bytes("ChlamydiaScreening_CDS_UsingCommon"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("draft"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(
force_bytes(inst.title),
force_bytes("Chalmydia Screening CDS Example Using Common"),
)
self.assertEqual(
force_bytes(inst.topic[0].text), force_bytes("Chlamydia Screeening")
)
self.assertEqual(force_bytes(inst.version), force_bytes("2.0.0"))
| 38.78512
| 191
| 0.576041
| 7,510
| 74,545
| 5.601598
| 0.08229
| 0.196586
| 0.196349
| 0.245436
| 0.883783
| 0.86857
| 0.849054
| 0.826329
| 0.803009
| 0.773201
| 0
| 0.028669
| 0.301871
| 74,545
| 1,921
| 192
| 38.80531
| 0.77964
| 0.002294
| 0
| 0.576145
| 0
| 0.005857
| 0.166716
| 0.020735
| 0
| 0
| 0
| 0
| 0.250266
| 1
| 0.011182
| false
| 0
| 0.00426
| 0
| 0.016507
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
09f6b804d27a26cc465a31702f0cb60abd7a7de6
| 80
|
py
|
Python
|
Chapter02/checkargs.py
|
Mehdi-Soltanmoradi/Python-Network-Programming
|
984bc9cbb0d42a86ec2076d72a6b9ef82fd3ada0
|
[
"MIT"
] | 36
|
2019-01-28T07:19:09.000Z
|
2022-01-13T04:44:38.000Z
|
Chapter02/checkargs.py
|
larisk8ter/Practical-Network-Automation
|
fa0e7e81869162fe578cf85166fdccca2acdd418
|
[
"MIT"
] | null | null | null |
Chapter02/checkargs.py
|
larisk8ter/Practical-Network-Automation
|
fa0e7e81869162fe578cf85166fdccca2acdd418
|
[
"MIT"
] | 37
|
2019-01-26T09:50:19.000Z
|
2022-02-28T22:16:36.000Z
|
import sys
print ("Total output is ")
print (int(sys.argv[1])+int(sys.argv[2]))
| 20
| 41
| 0.675
| 15
| 80
| 3.6
| 0.666667
| 0.222222
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.1125
| 80
| 3
| 42
| 26.666667
| 0.732394
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
09fe489efa28b32f1c8a145b061bded7329ea938
| 783
|
py
|
Python
|
tests/integration/test_sec.py
|
mhadam/holdingsparser
|
c2f58acf6414e417ff435bb01ecbb8c37be50d77
|
[
"MIT"
] | 9
|
2018-04-23T23:35:21.000Z
|
2021-11-03T04:34:09.000Z
|
tests/integration/test_sec.py
|
mhadam/holdingsparser
|
c2f58acf6414e417ff435bb01ecbb8c37be50d77
|
[
"MIT"
] | 2
|
2021-05-06T14:38:14.000Z
|
2021-05-08T01:17:33.000Z
|
tests/integration/test_sec.py
|
mhadam/holdingsparser
|
c2f58acf6414e417ff435bb01ecbb8c37be50d77
|
[
"MIT"
] | 4
|
2018-08-18T18:09:06.000Z
|
2021-06-21T02:02:07.000Z
|
from holdingsparser.sec import get_holdings_document_url
def test_get_holdings_document_url_new():
documents_url = "https://www.sec.gov/Archives/edgar/data/1166559/000110465921021959/0001104659-21-021959-index.htm"
result = get_holdings_document_url(documents_url)
expected = "https://www.sec.gov/Archives/edgar/data/1166559/000110465921021959/a21-6498_1informationtable.xml"
assert result == expected
def test_get_holdings_document_url():
documents_url = "https://www.sec.gov/Archives/edgar/data/1166559/000110465921021959/0001104659-21-021959-index.htm"
result = get_holdings_document_url(documents_url)
expected = "https://www.sec.gov/Archives/edgar/data/1166559/000110465921021959/a21-6498_1informationtable.xml"
assert result == expected
| 39.15
| 119
| 0.796935
| 101
| 783
| 5.940594
| 0.316832
| 0.091667
| 0.158333
| 0.183333
| 0.913333
| 0.913333
| 0.816667
| 0.816667
| 0.816667
| 0.816667
| 0
| 0.211566
| 0.094508
| 783
| 19
| 120
| 41.210526
| 0.634697
| 0
| 0
| 0.727273
| 0
| 0.363636
| 0.49553
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
61ce356895da46963f90bea4da737f5968938b7b
| 1,802
|
py
|
Python
|
dpm/criterion/adversarial_loss/gan_loss.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | 1
|
2021-07-20T14:02:55.000Z
|
2021-07-20T14:02:55.000Z
|
dpm/criterion/adversarial_loss/gan_loss.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
dpm/criterion/adversarial_loss/gan_loss.py
|
nextBillyonair/DPM
|
840ffaafe15c208b200b74094ffa8fe493b4c975
|
[
"MIT"
] | null | null | null |
from .adversarial_loss import AdversarialLoss
from torch.nn import BCEWithLogitsLoss, MSELoss
import torch
# Also NSGAN
class GANLoss(AdversarialLoss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bce_loss = BCEWithLogitsLoss()
def discriminator_loss(self, p_values, q_values):
p_loss = self.bce_loss(p_values, torch.ones_like(p_values))
q_loss = self.bce_loss(q_values, torch.zeros_like(q_values))
return p_loss + q_loss
def generator_loss(self, q_values):
return self.bce_loss(q_values, torch.ones_like(q_values))
class MMGANLoss(AdversarialLoss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bce_loss = BCEWithLogitsLoss()
def discriminator_loss(self, p_values, q_values):
p_loss = self.bce_loss(p_values, torch.ones_like(p_values))
q_loss = self.bce_loss(q_values, torch.zeros_like(q_values))
return p_loss + q_loss
def generator_loss(self, q_values):
return -self.bce_loss(q_values, torch.zeros_like(q_values))
class WGANLoss(AdversarialLoss):
def discriminator_loss(self, p_values, q_values):
return p_values.mean() - q_values.mean()
def generator_loss(self, q_values):
return q_values.mean()
class LSGANLoss(AdversarialLoss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mse_loss = MSELoss()
def discriminator_loss(self, p_values, q_values):
p_loss = self.mse_loss(p_values, torch.ones_like(p_values))
q_loss = self.mse_loss(q_values, torch.zeros_like(q_values))
return p_loss + q_loss
def generator_loss(self, q_values):
return self.mse_loss(q_values, torch.ones_like(q_values))
| 31.068966
| 68
| 0.695893
| 253
| 1,802
| 4.565217
| 0.134387
| 0.133333
| 0.07619
| 0.083117
| 0.805195
| 0.805195
| 0.8
| 0.771429
| 0.698701
| 0.698701
| 0
| 0
| 0.194229
| 1,802
| 57
| 69
| 31.614035
| 0.795455
| 0.005549
| 0
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.289474
| false
| 0
| 0.078947
| 0.131579
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
feec0ac10deeb4539d48ca91a99bef04691e5437
| 1,376
|
py
|
Python
|
ika_web/app/test/test_endpoints.py
|
Harisonm/ika
|
243ceab532007ee4fb05b205e1125fab5d3d325b
|
[
"Apache-2.0"
] | 6
|
2020-11-17T19:41:26.000Z
|
2021-01-13T14:55:56.000Z
|
ika_web/app/test/test_endpoints.py
|
Harisonm/ika
|
243ceab532007ee4fb05b205e1125fab5d3d325b
|
[
"Apache-2.0"
] | 3
|
2020-11-16T19:51:17.000Z
|
2020-11-16T19:51:36.000Z
|
ika_web/app/test/test_endpoints.py
|
Harisonm/Ika
|
243ceab532007ee4fb05b205e1125fab5d3d325b
|
[
"Apache-2.0"
] | null | null | null |
# import os
# import requests
# def test_credentials_test(api_v1_host):
# endpoint = os.path.join(api_v1_host, 'credentials', 'test')
# response = requests.get(endpoint)
# assert response.status_code == 200
# json = response.json()
# assert 'msg' in json
# assert json['msg'] == "I'm the test endpoint from credentials."
# def test_blueprint_y_test(api_v1_host):
# endpoint = os.path.join(api_v1_host, 'path_for_blueprint_y', 'test')
# response = requests.get(endpoint)
# assert response.status_code == 200
# json = response.json()
# assert 'msg' in json
# assert json['msg'] == "I'm the test endpoint from blueprint_y."
# def test_blueprint_x_plus(api_v1_host):
# endpoint = os.path.join(api_v1_host, 'path_for_blueprint_x', 'plus')
# payload = {'number': 5}
# response = requests.post(endpoint, json=payload)
# assert response.status_code == 200
# json = response.json()
# assert 'msg' in json
# assert json['msg'] == "Your result is: '10'"
# def test_blueprint_x_minus(api_v1_host):
# endpoint = os.path.join(api_v1_host, 'path_for_blueprint_y', 'minus')
# payload = {'number': 1000}
# response = requests.post(endpoint, json=payload)
# assert response.status_code == 200
# json = response.json()
# assert 'msg' in json
# assert json['msg'] == "Your result is: '0'"
| 38.222222
| 75
| 0.66061
| 189
| 1,376
| 4.592593
| 0.216931
| 0.046083
| 0.082949
| 0.078341
| 0.797235
| 0.797235
| 0.797235
| 0.797235
| 0.797235
| 0.797235
| 0
| 0.025294
| 0.195494
| 1,376
| 36
| 76
| 38.222222
| 0.758808
| 0.950581
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
28bf7455f1f00cf8a2e2c5779a408dca3b607561
| 171
|
py
|
Python
|
katas/kyu_7/all_unique.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_7/all_unique.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_7/all_unique.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
from collections import Counter
def has_unique_chars(s):
return Counter(s).most_common(1)[0][1] == 1
# def has_unique_chars(s):
# return len(s) == len(set(s))
| 17.1
| 47
| 0.666667
| 29
| 171
| 3.758621
| 0.551724
| 0.110092
| 0.220183
| 0.311927
| 0.440367
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0.028369
| 0.175439
| 171
| 9
| 48
| 19
| 0.744681
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
28e94a85f2ee42d44587a1a08b3845f9045fb7b2
| 3,249
|
py
|
Python
|
CursoemVideoPython/Desafio 33.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 33.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
CursoemVideoPython/Desafio 33.py
|
Beebruna/Python
|
bdbe10ea76acca1b417f5960db0aae8be44e0af3
|
[
"MIT"
] | null | null | null |
'''
Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
'''
num1 = int(input('Digite o primeiro número: '))
num2 = int(input('Digite o segundo número: '))
num3 = int(input('Digite o terceiro número: '))
'''
if num1 == num2 and num1 == num3 and num2 == num3:
print('\nOs números são todos iguais!')
else:
if num1 == num2 and num1 > num3:
print(f'\nO número {num1} é o maior de todos!')
print(f'O número {num3} é o menor de todos!')
else:
if num1 == num2 and num1 < num3:
print(f'\nO número {num3} é o maior de todos!')
print(f'O número {num1} é o menor de todos!')
else:
if num1 == num3 and num1 > num2:
print(f'\nO número {num1} é o maior de todos!')
print(f'O número {num2} é o menor de todos!')
else:
if num1 == num3 and num1 < num2:
print(f'\nO número {num2} é o maior de todos!')
print(f'O número {num1} é o menor de todos!')
else:
if num2 == num3 and num2 > num1:
print(f'\nO número {num2} é o maior de todos!')
print(f'O número {num1} é o menor de todos!')
else:
if num2 == num3 and num2 < num1:
print(f'\nO número {num1} é o maior de todos!')
print(f'O número {num2} é o menor de todos!')
else:
if num1 > num2 and num1 > num3:
print(f'\nO número {num1} é o maior de todos!')
if num2 < num3:
print(f'O número {num2} é o menor de todos!')
else:
print(f'O número {num3} é o menor de todos!')
else:
if num2 > num1 and num2 > num3:
print(f'\nO número {num2} é o maior de todos!')
if num1 < num3:
print(f'O número {num1} é o menor de todos!')
else:
print(f'O número {num3} é o menor de todos!')
else:
print(f'\nO número {num3} é o maior de todos!')
if num1 < num2:
print(f'O número {num1} é o menor de todos!')
else:
print(f'O número {num2} é o menor de todos!')
'''
#Correção
if num1 == num2 and num1 == num3 and num2 == num3:
print('\nOs números são todos iguais!')
else:
menor = num1
if num2 < num1 and num2 < num3:
menor = num2
else:
if num3 < num1 and num3 < num2:
menor = num3
maior = num1
if num2 > num1 and num2 > num3:
maior = num2
else:
if num3 > num1 and num3 > num2:
maior = num3
print(f'\n{menor} é o menor número.')
print(f'{maior} é o maior número.')
| 39.621951
| 85
| 0.429363
| 403
| 3,249
| 3.461538
| 0.091811
| 0.035842
| 0.070251
| 0.111828
| 0.81147
| 0.805735
| 0.793548
| 0.749104
| 0.70466
| 0.70466
| 0
| 0.056504
| 0.47707
| 3,249
| 81
| 86
| 40.111111
| 0.764567
| 0.027393
| 0
| 0.15
| 0
| 0
| 0.251185
| 0
| 0
| 0
| 0
| 0.271605
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e9361a6466e1f10368eca262f0fbe84a0a13ac8e
| 18,425
|
py
|
Python
|
train_semisupervised/models.py
|
zdx3578/self-driving-truck
|
0d6870ea8d00eb5daa89deee2ce0b8fe4d04783b
|
[
"MIT"
] | 373
|
2017-06-02T22:32:15.000Z
|
2022-03-27T12:23:03.000Z
|
train_semisupervised/models.py
|
zdx3578/self-driving-truck
|
0d6870ea8d00eb5daa89deee2ce0b8fe4d04783b
|
[
"MIT"
] | 10
|
2017-07-19T12:18:30.000Z
|
2020-10-07T23:08:58.000Z
|
train_semisupervised/models.py
|
Bill-ME/self-driving-truck
|
0d6870ea8d00eb5daa89deee2ce0b8fe4d04783b
|
[
"MIT"
] | 106
|
2017-06-08T05:12:58.000Z
|
2022-03-30T12:37:01.000Z
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import train
from lib import actions
from lib.util import to_cuda, to_variable
from config import Config
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import InplaceFunction
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import imgaug as ia
import random
import math
import cv2
class Predictor(nn.Module):
def __init__(self):
super(Predictor, self).__init__()
def identity(_):
return lambda x: x
#bn2d = nn.BatchNorm2d
#bn1d = nn.BatchNorm1d
bn2d = nn.InstanceNorm2d
bn1d = nn.InstanceNorm1d
#bn2d = identity
#bn1d = identity
#bn2d = InstanceNormalization
self.nb_previous_images = 2
self.emb_c1_curr = nn.Conv2d(3, 128, kernel_size=7, padding=3, stride=2)
self.emb_c1_bn_curr = bn2d(128)
self.emb_c1_sd_curr = nn.Dropout2d(0.0)
self.emb_c2_curr = nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1)
self.emb_c2_bn_curr = bn2d(128)
self.emb_c2_sd_curr = nn.Dropout2d(0.0)
self.emb_c3_curr = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.emb_c3_bn_curr = bn2d(256)
self.emb_c3_sd_curr = nn.Dropout2d(0.0)
self.emb_c1_prev = nn.Conv2d(self.nb_previous_images, 64, kernel_size=3, padding=1, stride=1)
self.emb_c1_bn_prev = bn2d(64)
self.emb_c1_sd_prev = nn.Dropout2d(0.0)
self.emb_c2_prev = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.emb_c2_bn_prev = bn2d(128)
self.emb_c2_sd_prev = nn.Dropout2d(0.0)
self.emb_c4 = nn.Conv2d(256+128+4, 256, kernel_size=5, padding=2, stride=2)
self.emb_c4_bn = bn2d(256)
self.emb_c4_sd = nn.Dropout2d(0.0)
self.emb_c5 = nn.Conv2d(256, 256, kernel_size=5, padding=2, stride=2)
self.emb_c5_bn = bn2d(256)
self.emb_c5_sd = nn.Dropout2d(0.0)
self.emb_c6 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2)
self.emb_c6_bn = bn2d(512)
self.emb_c6_sd = nn.Dropout2d(0.0)
self.emb_c7 = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
self.emb_c7_bn = bn2d(512)
self.emb_c7_sd = nn.Dropout2d(0.0)
self.maps_c1 = nn.Conv2d(512, 256, kernel_size=5, padding=2)
self.maps_c1_bn = bn2d(256)
self.maps_c2 = nn.Conv2d(256, 256, kernel_size=5, padding=(0, 2))
self.maps_c2_bn = bn2d(256)
self.maps_c3 = nn.Conv2d(256, 8+3+self.nb_previous_images+1+1, kernel_size=5, padding=2) # 8 grids, 3 for RGB AE, N prev for N grayscale AE, 1 flow, 1 canny
# road_type: 10
# intersection: 7
# direction: 3
# lane count: 5
# curve: 8
# space-front: 4
# space-left: 4
# space-right: 4
# offroad: 3
atts_size = 10 + 7 + 3 + 5 + 8 + 4 + 4 + 4 + 3
ma_size = 9 + 9 + 9 + 9
flipped_size = self.nb_previous_images
self.vec_fc1 = nn.Linear(512*3*5, atts_size+ma_size+flipped_size, bias=False)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
#m.weight.data.normal_(1.0, 0.02)
#m.bias.data.fill_(0)
def downscale(self, img):
return ia.imresize_single_image(img, (train.MODEL_HEIGHT, train.MODEL_WIDTH), interpolation="cubic")
def downscale_prev(self, img):
return ia.imresize_single_image(img, (train.MODEL_PREV_HEIGHT, train.MODEL_PREV_WIDTH), interpolation="cubic")
def embed_state(self, previous_states, state, volatile=False, requires_grad=True, gpu=-1):
prev_scrs = [self.downscale_prev(s.screenshot_rs) for s in previous_states]
prev_scrs_y = [cv2.cvtColor(scr, cv2.COLOR_RGB2GRAY) for scr in prev_scrs]
#inputs = np.dstack([self.downscale(state.screenshot_rs)] + list(reversed(prev_scrs_y)))
inputs = np.array(self.downscale(state.screenshot_rs), dtype=np.float32)
inputs = inputs / 255.0
inputs = inputs.transpose((2, 0, 1))
inputs = inputs[np.newaxis, ...]
inputs = to_cuda(to_variable(inputs, volatile=volatile, requires_grad=requires_grad), gpu)
inputs_prev = np.dstack(prev_scrs_y)
inputs_prev = inputs_prev.astype(np.float32) / 255.0
inputs_prev = inputs_prev.transpose((2, 0, 1))
inputs_prev = inputs_prev[np.newaxis, ...]
inputs_prev = to_cuda(to_variable(inputs_prev, volatile=volatile, requires_grad=requires_grad), gpu)
return self.embed(inputs, inputs_prev)
def embed(self, inputs, inputs_prev):
return self.forward(inputs, inputs_prev, only_embed=True)
def forward(self, inputs, inputs_prev, only_embed=False):
def act(x):
return F.relu(x, inplace=True)
def lrelu(x, negative_slope=0.2):
return F.leaky_relu(x, negative_slope=negative_slope, inplace=True)
def up(x, f=2):
m = nn.UpsamplingNearest2d(scale_factor=f)
return m(x)
def maxp(x):
return F.max_pool2d(x, 2)
B = inputs.size(0)
pos_x = np.tile(np.linspace(0, 1, 40).astype(np.float32).reshape(1, 1, 40), (B, 1, 23, 1))
pos_x = np.concatenate([pos_x, np.fliplr(pos_x)], axis=1)
pos_y = np.tile(np.linspace(0, 1, 23).astype(np.float32).reshape(1, 23, 1), (B, 1, 1, 40))
pos_y = np.concatenate([pos_y, np.flipud(pos_y)], axis=1)
"""
print(pos_x_curr[0, 0, 0, 0])
print(pos_x_curr[0, 0, 0, int(MODEL_WIDTH*(1/4))-1])
print(pos_x_curr[0, 0, 0, int(MODEL_WIDTH*(2/4))-1])
print(pos_x_curr[0, 0, 0, int(MODEL_WIDTH*(3/4))-1])
print(pos_x_curr[0, 0, 0, int(MODEL_WIDTH*(4/4))-1])
from scipy import misc
misc.imshow(
np.vstack([
np.squeeze(pos_x_curr[0].transpose((1, 2, 0))) * 255,
np.squeeze(pos_y_curr[0].transpose((1, 2, 0))) * 255
])
)
"""
pos_x = to_cuda(to_variable(pos_x, volatile=inputs.volatile, requires_grad=inputs.requires_grad), Config.GPU)
pos_y = to_cuda(to_variable(pos_y, volatile=inputs.volatile, requires_grad=inputs.requires_grad), Config.GPU)
x_emb0_curr = inputs # 3x90x160
x_emb1_curr = lrelu(self.emb_c1_sd_curr(self.emb_c1_bn_curr(self.emb_c1_curr(x_emb0_curr)))) # 45x80
x_emb2_curr = lrelu(self.emb_c2_sd_curr(self.emb_c2_bn_curr(self.emb_c2_curr(x_emb1_curr)))) # 45x80
x_emb2_curr = F.pad(x_emb2_curr, (0, 0, 1, 0)) # 45x80 -> 46x80
x_emb2_curr = maxp(x_emb2_curr) # 23x40
x_emb3_curr = lrelu(self.emb_c3_sd_curr(self.emb_c3_bn_curr(self.emb_c3_curr(x_emb2_curr)))) # 23x40
x_emb0_prev = inputs_prev # 2x45x80
x_emb1_prev = lrelu(self.emb_c1_sd_prev(self.emb_c1_bn_prev(self.emb_c1_prev(x_emb0_prev)))) # 45x80
x_emb1_prev = F.pad(x_emb1_prev, (0, 0, 1, 0)) # 45x80 -> 46x80
x_emb1_prev = maxp(x_emb1_prev) # 23x40
x_emb2_prev = lrelu(self.emb_c2_sd_prev(self.emb_c2_bn_prev(self.emb_c2_prev(x_emb1_prev)))) # 23x40
x_emb3 = torch.cat([x_emb3_curr, x_emb2_prev, pos_x, pos_y], 1)
x_emb3 = F.pad(x_emb3, (0, 0, 1, 0)) # 23x40 -> 24x40
x_emb4 = lrelu(self.emb_c4_sd(self.emb_c4_bn(self.emb_c4(x_emb3)))) # 12x20
x_emb5 = lrelu(self.emb_c5_sd(self.emb_c5_bn(self.emb_c5(x_emb4)))) # 6x10
x_emb6 = lrelu(self.emb_c6_sd(self.emb_c6_bn(self.emb_c6(x_emb5)))) # 3x5
x_emb7 = lrelu(self.emb_c7_sd(self.emb_c7_bn(self.emb_c7(x_emb6)))) # 3x5
x_emb = x_emb7
if only_embed:
return x_emb
else:
x_maps = x_emb # 3x5
x_maps = up(x_maps, 4) # 12x20
x_maps = lrelu(self.maps_c1_bn(self.maps_c1(x_maps))) # 12x20
x_maps = up(x_maps, 4) # 48x80
x_maps = lrelu(self.maps_c2_bn(self.maps_c2(x_maps))) # 48x80 -> 44x80
x_maps = F.pad(x_maps, (0, 0, 1, 0)) # 45x80
x_maps = up(x_maps) # 90x160
x_maps = F.sigmoid(self.maps_c3(x_maps)) # 90x160
ae_size = 3 + self.nb_previous_images
x_grids = x_maps[:, 0:8, ...]
x_ae = x_maps[:, 8:8+ae_size, ...]
x_flow = x_maps[:, 8+ae_size:8+ae_size+1, ...]
x_canny = x_maps[:, 8+ae_size+1:8+ae_size+2, ...]
x_vec = x_emb
x_vec = x_vec.view(-1, 512*3*5)
x_vec = F.dropout(x_vec, p=0.5, training=self.training)
x_vec = F.sigmoid(self.vec_fc1(x_vec))
atts_size = 10 + 7 + 3 + 5 + 8 + 4 + 4 + 4 + 3
ma_size = 9 + 9 + 9 + 9
x_atts = x_vec[:, 0:atts_size]
x_ma = x_vec[:, atts_size:atts_size+ma_size]
x_flipped = x_vec[:, atts_size+ma_size:]
return x_ae, x_grids, x_atts, x_ma, x_flow, x_canny, x_flipped, x_emb
def predict_grids(self, inputs, inputs_prev):
x_ae, x_grids, x_atts, x_ma, x_flow, x_canny, x_flipped, x_emb = self.forward(inputs, inputs_prev)
return x_grids
class PredictorWithShortcuts(nn.Module):
def __init__(self):
super(PredictorWithShortcuts, self).__init__()
def identity(_):
return lambda x: x
#bn2d = nn.BatchNorm2d
#bn1d = nn.BatchNorm1d
bn2d = nn.InstanceNorm2d
bn1d = nn.InstanceNorm1d
#bn2d = identity
#bn1d = identity
#bn2d = InstanceNormalization
self.nb_previous_images = 2
self.emb_c1_curr = nn.Conv2d(3, 128, kernel_size=7, padding=3, stride=2)
self.emb_c1_bn_curr = bn2d(128)
self.emb_c1_sd_curr = nn.Dropout2d(0.0)
self.emb_c2_curr = nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1)
self.emb_c2_bn_curr = bn2d(128)
self.emb_c2_sd_curr = nn.Dropout2d(0.0)
self.emb_c3_curr = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1)
self.emb_c3_bn_curr = bn2d(256)
self.emb_c3_sd_curr = nn.Dropout2d(0.0)
self.emb_c1_prev = nn.Conv2d(self.nb_previous_images, 64, kernel_size=3, padding=1, stride=1)
self.emb_c1_bn_prev = bn2d(64)
self.emb_c1_sd_prev = nn.Dropout2d(0.0)
self.emb_c2_prev = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1)
self.emb_c2_bn_prev = bn2d(128)
self.emb_c2_sd_prev = nn.Dropout2d(0.0)
self.emb_c4 = nn.Conv2d(256+128+4, 256, kernel_size=5, padding=2, stride=2)
self.emb_c4_bn = bn2d(256)
self.emb_c4_sd = nn.Dropout2d(0.0)
self.emb_c5 = nn.Conv2d(256, 256, kernel_size=5, padding=2, stride=2)
self.emb_c5_bn = bn2d(256)
self.emb_c5_sd = nn.Dropout2d(0.0)
self.emb_c6 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2)
self.emb_c6_bn = bn2d(512)
self.emb_c6_sd = nn.Dropout2d(0.0)
self.emb_c7 = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
self.emb_c7_bn = bn2d(512)
self.emb_c7_sd = nn.Dropout2d(0.0)
self.maps_c1 = nn.Conv2d(512+256, 256, kernel_size=5, padding=2)
self.maps_c1_bn = bn2d(256)
self.maps_c2 = nn.Conv2d(256+128, 256, kernel_size=5, padding=(0, 2))
self.maps_c2_bn = bn2d(256)
self.maps_c3 = nn.Conv2d(256+3, 8+3+self.nb_previous_images+1+1, kernel_size=5, padding=2) # 8 grids, 3 for RGB AE, N prev for N grayscale AE, 1 flow, 1 canny
# road_type: 10
# intersection: 7
# direction: 3
# lane count: 5
# curve: 8
# space-front: 4
# space-left: 4
# space-right: 4
# offroad: 3
atts_size = 10 + 7 + 3 + 5 + 8 + 4 + 4 + 4 + 3
ma_size = 9 + 9 + 9 + 9
flipped_size = self.nb_previous_images
self.vec_fc1 = nn.Linear(512*3*5, atts_size+ma_size+flipped_size, bias=False)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
#m.weight.data.normal_(1.0, 0.02)
#m.bias.data.fill_(0)
def downscale(self, img):
return ia.imresize_single_image(img, (train.MODEL_HEIGHT, train.MODEL_WIDTH), interpolation="cubic")
def downscale_prev(self, img):
return ia.imresize_single_image(img, (train.MODEL_PREV_HEIGHT, train.MODEL_PREV_WIDTH), interpolation="cubic")
def embed_state(self, previous_states, state, volatile=False, requires_grad=True, gpu=-1):
prev_scrs = [self.downscale_prev(s.screenshot_rs) for s in previous_states]
prev_scrs_y = [cv2.cvtColor(scr, cv2.COLOR_RGB2GRAY) for scr in prev_scrs]
#inputs = np.dstack([self.downscale(state.screenshot_rs)] + list(reversed(prev_scrs_y)))
inputs = np.array(self.downscale(state.screenshot_rs), dtype=np.float32)
inputs = inputs / 255.0
inputs = inputs.transpose((2, 0, 1))
inputs = inputs[np.newaxis, ...]
inputs = to_cuda(to_variable(inputs, volatile=volatile, requires_grad=requires_grad), gpu)
inputs_prev = np.dstack(prev_scrs_y)
inputs_prev = inputs_prev.astype(np.float32) / 255.0
inputs_prev = inputs_prev.transpose((2, 0, 1))
inputs_prev = inputs_prev[np.newaxis, ...]
inputs_prev = to_cuda(to_variable(inputs_prev, volatile=volatile, requires_grad=requires_grad), gpu)
return self.embed(inputs, inputs_prev)
def embed(self, inputs, inputs_prev):
return self.forward(inputs, inputs_prev, only_embed=True)
def forward(self, inputs, inputs_prev, only_embed=False):
def act(x):
return F.relu(x, inplace=True)
def lrelu(x, negative_slope=0.2):
return F.leaky_relu(x, negative_slope=negative_slope, inplace=True)
def up(x, f=2):
m = nn.UpsamplingNearest2d(scale_factor=f)
return m(x)
def maxp(x):
return F.max_pool2d(x, 2)
B = inputs.size(0)
pos_x = np.tile(np.linspace(0, 1, 40).astype(np.float32).reshape(1, 1, 40), (B, 1, 23, 1))
pos_x = np.concatenate([pos_x, np.fliplr(pos_x)], axis=1)
pos_y = np.tile(np.linspace(0, 1, 23).astype(np.float32).reshape(1, 23, 1), (B, 1, 1, 40))
pos_y = np.concatenate([pos_y, np.flipud(pos_y)], axis=1)
pos_x = to_cuda(to_variable(pos_x, volatile=inputs.volatile, requires_grad=inputs.requires_grad), Config.GPU)
pos_y = to_cuda(to_variable(pos_y, volatile=inputs.volatile, requires_grad=inputs.requires_grad), Config.GPU)
x_emb0_curr = inputs # 3x90x160
x_emb1_curr = lrelu(self.emb_c1_sd_curr(self.emb_c1_bn_curr(self.emb_c1_curr(x_emb0_curr)))) # 45x80
x_emb2_curr = lrelu(self.emb_c2_sd_curr(self.emb_c2_bn_curr(self.emb_c2_curr(x_emb1_curr)))) # 45x80
x_emb2_curr = F.pad(x_emb2_curr, (0, 0, 1, 0)) # 45x80 -> 46x80
x_emb2_curr_pool = maxp(x_emb2_curr) # 23x40
x_emb3_curr = lrelu(self.emb_c3_sd_curr(self.emb_c3_bn_curr(self.emb_c3_curr(x_emb2_curr_pool)))) # 23x40
x_emb0_prev = inputs_prev # 2x45x80
x_emb1_prev = lrelu(self.emb_c1_sd_prev(self.emb_c1_bn_prev(self.emb_c1_prev(x_emb0_prev)))) # 45x80
x_emb1_prev = F.pad(x_emb1_prev, (0, 0, 1, 0)) # 45x80 -> 46x80
x_emb1_prev = maxp(x_emb1_prev) # 23x40
x_emb2_prev = lrelu(self.emb_c2_sd_prev(self.emb_c2_bn_prev(self.emb_c2_prev(x_emb1_prev)))) # 23x40
x_emb3 = torch.cat([x_emb3_curr, x_emb2_prev, pos_x, pos_y], 1)
x_emb3 = F.pad(x_emb3, (0, 0, 1, 0)) # 23x40 -> 24x40
x_emb4 = lrelu(self.emb_c4_sd(self.emb_c4_bn(self.emb_c4(x_emb3)))) # 12x20
x_emb5 = lrelu(self.emb_c5_sd(self.emb_c5_bn(self.emb_c5(x_emb4)))) # 6x10
x_emb6 = lrelu(self.emb_c6_sd(self.emb_c6_bn(self.emb_c6(x_emb5)))) # 3x5
x_emb7 = lrelu(self.emb_c7_sd(self.emb_c7_bn(self.emb_c7(x_emb6)))) # 3x5
x_emb = x_emb7
if only_embed:
return x_emb
else:
x_maps = x_emb # 3x5
x_maps = up(x_maps, 4) # 12x20
x_maps = lrelu(self.maps_c1_bn(self.maps_c1(
torch.cat([x_maps, x_emb4], 1)
))) # 12x20
x_maps = up(x_maps, 4) # 48x80
x_maps = lrelu(self.maps_c2_bn(self.maps_c2(
torch.cat([x_maps, F.pad(x_emb2_curr, (0, 0, 1, 1))], 1)
))) # 48x80 -> 44x80
x_maps = F.pad(x_maps, (0, 0, 1, 0)) # 45x80
x_maps = up(x_maps) # 90x160
x_maps = F.sigmoid(self.maps_c3(
torch.cat([x_maps, inputs], 1)
)) # 90x160
ae_size = 3 + self.nb_previous_images
x_grids = x_maps[:, 0:8, ...]
x_ae = x_maps[:, 8:8+ae_size, ...]
x_flow = x_maps[:, 8+ae_size:8+ae_size+1, ...]
x_canny = x_maps[:, 8+ae_size+1:8+ae_size+2, ...]
x_vec = x_emb
x_vec = x_vec.view(-1, 512*3*5)
x_vec = F.dropout(x_vec, p=0.5, training=self.training)
x_vec = F.sigmoid(self.vec_fc1(x_vec))
atts_size = 10 + 7 + 3 + 5 + 8 + 4 + 4 + 4 + 3
ma_size = 9 + 9 + 9 + 9
x_atts = x_vec[:, 0:atts_size]
x_ma = x_vec[:, atts_size:atts_size+ma_size]
x_flipped = x_vec[:, atts_size+ma_size:]
return x_ae, x_grids, x_atts, x_ma, x_flow, x_canny, x_flipped, x_emb
def predict_grids(self, inputs, inputs_prev):
x_ae, x_grids, x_atts, x_ma, x_flow, x_canny, x_flipped, x_emb = self.forward(inputs, inputs_prev)
return x_grids
| 43.049065
| 166
| 0.614383
| 2,981
| 18,425
| 3.51627
| 0.080174
| 0.072124
| 0.020607
| 0.022324
| 0.941423
| 0.940946
| 0.936367
| 0.932551
| 0.929498
| 0.929498
| 0
| 0.087759
| 0.256011
| 18,425
| 427
| 167
| 43.149883
| 0.676904
| 0.06578
| 0
| 0.857143
| 0
| 0
| 0.003616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079734
| false
| 0
| 0.059801
| 0.046512
| 0.225914
| 0.003322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a63a44318c3f788a7357416beed8f13234ef4af
| 14,101
|
py
|
Python
|
qa327_test/frontend/test_sell.py
|
CanonAY/Highedge-Co.
|
aced31e287b3dd4d6fb444825aa5b553166deff9
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_sell.py
|
CanonAY/Highedge-Co.
|
aced31e287b3dd4d6fb444825aa5b553166deff9
|
[
"MIT"
] | null | null | null |
qa327_test/frontend/test_sell.py
|
CanonAY/Highedge-Co.
|
aced31e287b3dd4d6fb444825aa5b553166deff9
|
[
"MIT"
] | 2
|
2021-02-23T06:18:42.000Z
|
2021-04-17T07:07:52.000Z
|
import pytest
from seleniumbase import BaseCase
from selenium.webdriver.common.keys import Keys
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327.models import db, User
from werkzeug.security import generate_password_hash, check_password_hash
"""
This file defines all unit tests for the frontend login page.
"""
# Moch a sample user
test_user = User(
email='test_sell@test.com',
name='test_sell',
password=generate_password_hash('TEST_frontend')
)
class FrontEndSellTest(BaseCase):
# Login to the profile for the purpose of testing functionality of sell form.
def login_to_profile(self):
# invalid any existing session
self.open(base_url + '/logout')
self.open(base_url + '/login')
# enter test user email and password
self.type("#email", "test_sell@test.com")
self.type("#password", "TEST_frontend")
# click enter button
self.click('input[type="submit"]')
# R4.1 The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the first or the last character.
@patch('qa327.backend.get_user', return_value=test_user)
def test_name_format(self, *_):
# login to the profile
self.login_to_profile()
""" NEGATIVE """
# enter invalid name and valid quantity, price and date
self.type("#sell-name", " TESTticket")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT, "31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid name and valid quantity, price and date
self.type("#sell-name", "TESTticket ")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid name and valid quantity, price and date
self.type("#sell-name", "TE_STticket")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
""" POSITIVE """
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# R4.2 The name of the ticket is no longer than 60 characters
# R4.8 (optional) The name of the tickets has to contain at least 6 characters
@patch('qa327.backend.get_user', return_value=test_user)
def test_name_length(self, *_):
# login to the profile
self.login_to_profile()
""" NEGATIVE """
# enter invalid name and valid quantity, price and date
self.type("#sell-name", 6*"TESTticket1")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid name and valid quantity, price and date
self.type("#sell-name", "TEST1")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
""" POSITIVE """
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket1")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# R4.3 The quantity of the tickets has to be more than 0, and less than or equal to 100.
@patch('qa327.backend.get_user', return_value=test_user)
def test_quantity(self, *_):
# login to the profile
self.login_to_profile()
""" NEGATIVE """
# enter invalid quantity and valid name, price and date
self.type("#sell-name", "TESTticket2")
self.type("#sell-quantity", "0")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid quantity and valid name, price and date
self.type("#sell-name", "TESTticket2")
self.type("#sell-quantity", "102")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
""" POSITIVE """
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket2")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# R4.4 Price has to be of range [10, 100]
@patch('qa327.backend.get_user', return_value=test_user)
def test_price(self, *_):
# login to the profile
self.login_to_profile()
""" NEGATIVE """
# enter invalid price and valid name, quantity and date
self.type("#sell-name", "TESTticket3")
self.type("#sell-quantity", "50")
self.type("#sell-price", "0")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid price and valid name, quantity and date
self.type("#sell-name", "TESTticket3")
self.type("#sell-quantity", "50")
self.type("#sell-price", "102")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
""" POSITIVE """
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket3")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# R4.5 Date must be given in the format YYYYMMDD (e.g. 20200901)
@patch('qa327.backend.get_user', return_value=test_user)
def test_date(self, *_):
# login to the profile
self.login_to_profile()
""" NEGATIVE """
# enter invalid date and valid name, price and quantity
self.type("#sell-name", "TESTticket4")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("201202", "12", "31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
""" POSITIVE """
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket4")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# R4.6 For any errors, redirect back to / and show an error message
@patch('qa327.backend.get_user', return_value=test_user)
def test_redirect(self, *_):
# login to the profile
self.login_to_profile()
# enter invalid name and valid date, price and quantity
self.type("#sell-name", "TEST_ticket5")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_title("Profile")
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid quantity and valid date, price and name
self.type("#sell-name", "TEST_ticket5")
self.type("#sell-quantity", "0")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_title("Profile")
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid price and valid name, quantity and date
self.type("#sell-name", "TESTticket5")
self.type("#sell-quantity", "50")
self.type("#sell-price", "0")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_title("Profile")
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# enter invalid date and valid name, price and quantity
self.type("#sell-name", "TESTticket5")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("201202", "12", "31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit failed
self.assert_title("Profile")
self.assert_element("#message_s")
self.assert_text("Ticket format invalid", "#message_s")
self.open(base_url)
# R4.7 The added new ticket information will be posted on the user profile page
@patch('qa327.backend.get_user', return_value=test_user)
def test_post_ticket(self, *_):
# login to the profile
self.login_to_profile()
# enter valid name, quantity, price and date
self.type("#sell-name", "TESTticket6")
self.type("#sell-quantity", "50")
self.type("#sell-price", "50")
date = self.find_element("#sell-date")
date.send_keys("2020", Keys.ARROW_RIGHT, "12", Keys.ARROW_RIGHT,"31")
self.click('input[value="Submit Selling Ticket"]')
# assert ticket submit succeed
self.assert_element("#message")
self.assert_text("Ticket successfully posted", "#message")
self.open(base_url)
# assert the ticket posted on profile page
self.assert_text("TESTticket6")
| 39.946176
| 131
| 0.623431
| 1,814
| 14,101
| 4.712789
| 0.094818
| 0.058018
| 0.08422
| 0.038601
| 0.864546
| 0.847351
| 0.847351
| 0.845479
| 0.845479
| 0.845479
| 0
| 0.029896
| 0.233813
| 14,101
| 352
| 132
| 40.059659
| 0.761385
| 0.180342
| 0
| 0.848889
| 0
| 0
| 0.285347
| 0.013668
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.035556
| false
| 0.013333
| 0.031111
| 0
| 0.071111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3ab9cd8cbe494c909c3d2d08361fabcb98fac9d0
| 24,557
|
py
|
Python
|
tests/test_validate_network_bgp.py
|
Cray-HPE/canu
|
3a92ce1e9b63f35aa30b9135afaa734e61909407
|
[
"MIT"
] | 6
|
2021-09-16T22:02:48.000Z
|
2022-02-04T18:08:57.000Z
|
tests/test_validate_network_bgp.py
|
Cray-HPE/canu
|
3a92ce1e9b63f35aa30b9135afaa734e61909407
|
[
"MIT"
] | 57
|
2021-09-17T17:15:59.000Z
|
2022-03-31T20:56:21.000Z
|
tests/test_validate_network_bgp.py
|
Cray-HPE/canu
|
3a92ce1e9b63f35aa30b9135afaa734e61909407
|
[
"MIT"
] | 4
|
2022-01-06T17:09:02.000Z
|
2022-02-04T18:09:33.000Z
|
# MIT License
#
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Test CANU validate network bgp commands."""
from unittest.mock import patch
from click import testing
import requests
import responses
from canu.cli import cli
username = "admin"
password = "admin"
ip = "192.168.1.1"
asn = 65533
sls_cache = {
"HMN_IPs": {
"sw-spine-001": "192.168.1.1",
"sw-spine-002": "192.168.1.2",
},
"SWITCH_ASN": asn,
}
cache_minutes = 0
sls_address = "api-gw-service-nmn.local"
runner = testing.CliRunner()
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_aruba(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/Customer/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "PASS - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "PASS - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_verbose(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip_address in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system/vrfs/default/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established,
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system/vrfs/Customer/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established_cmn,
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
"--verbose",
],
)
assert result.exit_code == 0
assert "Switch: sw-spine-001 (192.168.1.1) " in str(result.output)
assert "sw-spine-001 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.4: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.10.2: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.10.3: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.10.4: Established" in str(result.output)
assert "Switch: sw-spine-002 (192.168.1.2) " in str(result.output)
assert "sw-spine-002 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.4: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.10.2: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.10.3: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.10.4: Established" in str(result.output)
assert "PASS - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "PASS - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_nmn(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip_address in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system/vrfs/default/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established,
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
"--verbose",
"--network",
"nmn",
],
)
assert result.exit_code == 0
assert "Switch: sw-spine-001 (192.168.1.1) " in str(result.output)
assert "sw-spine-001 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.4: Established" in str(result.output)
assert "Switch: sw-spine-002 (192.168.1.2) " in str(result.output)
assert "sw-spine-002 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.4: Established" in str(result.output)
assert "PASS - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "PASS - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_cmn(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip_address in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system/vrfs/Customer/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=all_established_cmn,
)
responses.add(
responses.GET,
f"https://{ip_address}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip_address}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
"--verbose",
"--network",
"cmn",
],
)
assert result.exit_code == 0
assert "Switch: sw-spine-001 (192.168.1.1) " in str(result.output)
assert "sw-spine-001 ===> 192.168.10.2: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.10.3: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.10.4: Established" in str(result.output)
assert "Switch: sw-spine-002 (192.168.1.2) " in str(result.output)
assert "sw-spine-002 ===> 192.168.10.2: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.10.3: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.10.4: Established" in str(result.output)
assert "PASS - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "PASS - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_bad_password(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command errors on bad credentials."""
bad_password = "foo"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
body=requests.exceptions.HTTPError("Client Error: Unauthorized for url"),
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
bad_password,
],
)
assert result.exit_code == 0
assert (
"Error connecting to switch 192.168.1.1, check the username or password"
in str(result.output)
)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_fail(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=one_idle,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/Customer/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=one_idle,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "FAIL - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "FAIL - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_fail_verbose(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs and returns PASS."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
for name, ip in sls_cache["HMN_IPs"].items():
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=one_idle,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/Customer/bgp_routers/{asn}/bgp_neighbors?depth=2",
json=one_idle,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname",
json={"hostname": name, "platform_name": "X86-64"},
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
"--verbose",
],
)
assert result.exit_code == 0
assert "Switch: sw-spine-001 (192.168.1.1) " in str(result.output)
assert "sw-spine-001 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-001 ===> 192.168.1.4: Idle" in str(result.output)
assert "Switch: sw-spine-002 (192.168.1.2) " in str(result.output)
assert "sw-spine-002 ===> 192.168.1.2: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.3: Established" in str(result.output)
assert "sw-spine-002 ===> 192.168.1.4: Idle" in str(result.output)
assert "FAIL - IP: 192.168.1.1 Hostname: sw-spine-001" in str(result.output)
assert "FAIL - IP: 192.168.1.2 Hostname: sw-spine-002" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_vendor_error(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command errors on 'None' vendor."""
with runner.isolated_filesystem():
switch_vendor.return_value = None
pull_sls_networks.return_value = sls_cache
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "192.168.1.1 - Connection Error" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@patch("canu.validate.network.bgp.bgp.get_bgp_neighbors_aruba")
@responses.activate
def test_validate_bgp_exception(
get_bgp_neighbors_aruba,
pull_sls_networks,
switch_vendor,
):
"""Test that the `canu validate network bgp` command errors on exception."""
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
pull_sls_networks.return_value = sls_cache
get_bgp_neighbors_aruba.side_effect = requests.exceptions.HTTPError
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "192.168.1.1 - Connection Error" in str(result.output)
# Mellanox
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_mellanox(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command runs with Mellanox switch."""
with runner.isolated_filesystem():
switch_vendor.return_value = "mellanox"
pull_sls_networks.return_value = sls_cache
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json={"status": "OK", "status_msg": "Successfully logged-in"},
)
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json=bgp_status_mellanox,
)
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json={"data": [{"Hostname": "sw-spine-mellanox"}]},
)
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json={"data": {"value": ["MSN2100"]}},
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "PASS - IP: 192.168.1.1 Hostname: sw-spine-mellanox" in str(
result.output,
)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_mellanox_connection_error(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command errors with Mellanox switch connection error."""
with runner.isolated_filesystem():
switch_vendor.return_value = "mellanox"
pull_sls_networks.return_value = sls_cache
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
status=404,
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "192.168.1.1 - Connection Error" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_mellanox_bad_login(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command errors with Mellanox switch bad login."""
with runner.isolated_filesystem():
switch_vendor.return_value = "mellanox"
pull_sls_networks.return_value = sls_cache
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json={"status": "ERROR", "status_msg": "Invalid username or password"},
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "192.168.1.1 - Connection Error" in str(result.output)
@patch("canu.validate.network.bgp.bgp.switch_vendor")
@patch("canu.validate.network.bgp.bgp.pull_sls_networks")
@responses.activate
def test_validate_bgp_mellanox_exception(pull_sls_networks, switch_vendor):
"""Test that the `canu validate network bgp` command errors with Mellanox switch exception."""
with runner.isolated_filesystem():
switch_vendor.return_value = "mellanox"
pull_sls_networks.return_value = sls_cache
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
json={"status": "OK", "status_msg": "Successfully logged-in"},
)
responses.add(
responses.POST,
f"https://{ip}/admin/launch?script=rh&template=json-request&action=json-login",
body=requests.exceptions.HTTPError(),
)
result = runner.invoke(
cli,
[
"validate",
"network",
"bgp",
"--username",
username,
"--password",
password,
],
)
assert result.exit_code == 0
assert "192.168.1.1 - Connection Error" in str(result.output)
all_established = {
"192.168.1.2": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.1.3": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.1.4": {
"status": {"bgp_peer_state": "Established"},
},
}
all_established_cmn = {
"192.168.10.2": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.10.3": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.10.4": {
"status": {"bgp_peer_state": "Established"},
},
}
one_idle = {
"192.168.1.2": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.1.3": {
"status": {"bgp_peer_state": "Established"},
},
"192.168.1.4": {
"status": {"bgp_peer_state": "Idle"},
},
}
dell_firmware_mock = {
"dell-system-software:sw-version": {
"sw-version": "10.5.1.4",
"sw-platform": "S4048T-ON",
},
}
dell_hostname_mock = {"dell-system:hostname": "test-dell"}
bgp_status_mellanox = {
"status": "OK",
"executed_command": "show ip bgp summary",
"status_message": "",
"data": [
{
"VRF name": "default",
},
{
"192.168.1.9": [
{
"State/PfxRcd": "ESTABLISHED/13",
},
],
},
],
}
| 36.166421
| 113
| 0.577269
| 2,907
| 24,557
| 4.755074
| 0.085311
| 0.030384
| 0.045359
| 0.070101
| 0.867467
| 0.864212
| 0.856543
| 0.848947
| 0.83976
| 0.838458
| 0
| 0.052767
| 0.288472
| 24,557
| 678
| 114
| 36.219764
| 0.738339
| 0.087877
| 0
| 0.724662
| 0
| 0.030405
| 0.345183
| 0.057291
| 0
| 0
| 0
| 0
| 0.118243
| 1
| 0.021959
| false
| 0.067568
| 0.008446
| 0
| 0.030405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
3ac2862bd592576f2ad893877b0fe9b5151232be
| 41
|
py
|
Python
|
validate_passcode.py
|
kimsappi/laundry_reservation_flask
|
5668df956d899934db4ad4ec26fb7b4e6e5770f4
|
[
"Unlicense"
] | null | null | null |
validate_passcode.py
|
kimsappi/laundry_reservation_flask
|
5668df956d899934db4ad4ec26fb7b4e6e5770f4
|
[
"Unlicense"
] | null | null | null |
validate_passcode.py
|
kimsappi/laundry_reservation_flask
|
5668df956d899934db4ad4ec26fb7b4e6e5770f4
|
[
"Unlicense"
] | null | null | null |
def validate_passcode(data):
return True
| 20.5
| 28
| 0.829268
| 6
| 41
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 2
| 29
| 20.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
3acea938f6ce580c6754da7a0d20e9034741125d
| 21,223
|
py
|
Python
|
pyban/tickets/tests.py
|
abderrahmen-hadjadj-aoul/pyban
|
82fe3f0bcf36880b710bbf617f2a7e6b1097f80c
|
[
"MIT"
] | null | null | null |
pyban/tickets/tests.py
|
abderrahmen-hadjadj-aoul/pyban
|
82fe3f0bcf36880b710bbf617f2a7e6b1097f80c
|
[
"MIT"
] | null | null | null |
pyban/tickets/tests.py
|
abderrahmen-hadjadj-aoul/pyban
|
82fe3f0bcf36880b710bbf617f2a7e6b1097f80c
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.urls import reverse
import json
class GeneralTests(TestCase):
def setUp(self):
self.client = Client()
# HELPERS
def create_user(self, username, password):
payload = {
"username": username,
"password": password,
}
response = self.client.post(reverse("tickets:users"),
json.dumps(payload),
content_type="application/json")
user = json.loads(response.content)
return (response, user)
def get_user(self, id):
response = self.client.get(
reverse("tickets:user", kwargs={"user_id": id}))
user = json.loads(response.content)
return (response, user)
def get_users(self):
response = self.client.get(reverse("tickets:users"))
data = json.loads(response.content)
return (response, data)
def get_token(self, username, password):
payload = {
"username": username,
"password": password,
}
response = self.client.post(reverse("tickets:api_token_auth"),
json.dumps(payload),
follow=True,
content_type="application/json")
data = json.loads(response.content)
return (response, data)
# USERS
def test_create_user(self):
"""
A user should be created
"""
username = "Tom"
password = "123"
(response, user) = self.create_user(username, password)
self.assertIs(response.status_code, 201)
self.assertEqual(user['username'], username)
def test_create_user_witout_username(self):
"""
A user created without username should return an error
"""
username = ""
password = "123"
(response, data) = self.create_user(username, password)
self.assertEqual(response.status_code, 400)
self.assertEqual(data['message'], "Username must be set")
def test_create_user_twice(self):
"""
A user created twice should return an error
"""
username = "Tom"
password = "123"
(response, user) = self.create_user(username, password)
self.assertIs(response.status_code, 201)
self.assertEqual(user['username'], username)
(response, data) = self.create_user(username, password)
self.assertEqual(data['message'], "User already exists")
def test_create_then_get_user(self):
"""
A user should be created then get
"""
username = "Tom2"
password = "1232"
(response, created_user) = self.create_user(username, password)
id = created_user['id']
(response, user) = self.get_user(id)
self.assertEqual(response.status_code, 200)
self.assertEqual(user['username'], username)
def test_get_user_not_exist(self):
"""
An error should be returned
"""
id = 123456789
(response, user) = self.get_user(id)
self.assertEqual(response.status_code, 404)
def test_get_users(self):
"""
Should get users
"""
username = "Tom3"
password = "1233"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_users()
self.assertEqual(response.status_code, 200)
self.assertEqual(len(data['users']), 1)
self.assertEqual(data['users'][0]['username'], username)
def test_get_token(self):
"""
Should log properly with right credentials
"""
username = "Tom3"
password = "1233"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
self.assertEqual(response.status_code, 200)
self.assertIn("token", data)
def test_login_wrong_username(self):
"""
Should return an error for wrong credentials
"""
username = "Tom3"
password = "1233"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username + "0", password)
self.assertEqual(response.status_code, 400)
def test_login_wrong_password(self):
"""
Should return an error for wrong credentials
"""
username = "Tom3"
password = "1233"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password + "0")
self.assertEqual(response.status_code, 400)
# BOARD
def create_board(self, name, headers={}):
payload = {
"name": name,
"columns": [],
}
response = self.client.post(reverse("tickets:boards"),
json.dumps(payload),
content_type="application/json",
**headers)
board = json.loads(response.content)
boardid = board["id"]
payload = {
"title": "TODO",
}
response = self.client.post(reverse("tickets:board_columns",
kwargs={"pk": boardid}),
json.dumps(payload),
content_type="application/json",
**headers)
column = json.loads(response.content)
board["columns"].append(column["id"])
return (response, board, column)
def get_board(self, board_id, headers={}):
response = self.client.get(
reverse("tickets:board", kwargs={"pk": board_id}), **headers)
board = {}
try:
board = json.loads(response.content)
except Exception:
pass
return (response, board)
def test_board_create(self):
"""
Should create a board
"""
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
name = "Board Name"
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
self.assertEqual(created_board["name"], name)
def test_board_create_then_get_it(self):
"""
Should get the created board
"""
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
# Get token
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
boardid = created_board["id"]
# Get board
(response, board) = self.get_board(boardid, headers=headers)
self.assertEqual(board["id"], boardid)
self.assertEqual(board["name"], name)
def test_board_get_not_exist(self):
"""
Should return error for board that does not exist
"""
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
boardid = 12346789
(response, board) = self.get_board(boardid, headers=headers)
self.assertEqual(response.status_code, 404)
def test_board_permission(self):
"""
Should return error for no permission of board
"""
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
username2 = "Tom--"
password2 = "123--"
(response, created_user2) = self.create_user(username2, password2)
# Get token
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
(response2, data2) = self.get_token(username2, password2)
token2 = data2["token"]
headers2 = {"HTTP_AUTHORIZATION": "Token " + token2}
# Create board
name = "Board Name"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
boardid = created_board["id"]
# Get board
(response, board) = self.get_board(boardid, headers=headers2)
self.assertEqual(response.status_code, 401)
def test_get_board_list(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
# Get token
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create boards
created_boards = []
number_of_boards = 5
for n in range(number_of_boards):
name = "Board Name" + str(n)
(response, created_board,
created_column) = self.create_board(name, headers=headers)
created_boards.append((response, created_board))
response = self.client.get(reverse("tickets:boards"), **headers)
boards = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(boards), 5)
for n in range(number_of_boards):
self.assertIn(created_boards[n][1], boards)
def test_get_board_list_without_other_users_boards(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
username2 = "Tom--"
password2 = "123--"
(response2, created_user2) = self.create_user(username2, password2)
# Get token
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
(response2, data2) = self.get_token(username2, password2)
token2 = data2["token"]
headers2 = {"HTTP_AUTHORIZATION": "Token " + token2}
# Create boards
created_boards = []
number_of_boards = 5
for n in range(number_of_boards):
name = "Board Name" + str(n)
(response, created_board,
created_column) = self.create_board(name, headers=headers)
created_boards.append((response, created_board))
name = "Board Name Other" + str(n)
(response, created_board,
created_column) = self.create_board(name, headers=headers2)
response = self.client.get(reverse("tickets:boards"), **headers)
boards = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(boards), 5)
for n in range(number_of_boards):
self.assertIn(created_boards[n][1], boards)
# TICKETS
def test_create_ticket(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
columnid = created_column["id"]
# Create ticket
payload = {
"title": "Ticket Title",
"description": "Ticket Description",
"column": columnid,
}
response = self.client.post(reverse("tickets:tickets"),
json.dumps(payload),
content_type="application/json",
**headers)
ticket = json.loads(response.content)
ticketid = ticket["id"]
self.assertEqual(response.status_code, 201)
self.assertEqual(ticket["title"], payload["title"])
self.assertEqual(ticket["description"], payload["description"])
# Get Ticket
response = self.client.get(
reverse("tickets:ticket", kwargs={"pk": ticketid}), **headers)
ticket = json.loads(response.content)
self.assertEqual(ticket["title"], payload["title"])
self.assertEqual(ticket["description"], payload["description"])
def test_create_ticket_wrong_board(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
username2 = "Tom--"
password2 = "123"
(response, created_user) = self.create_user(username2, password2)
(response, data) = self.get_token(username2, password2)
token2 = data["token"]
headers2 = {"HTTP_AUTHORIZATION": "Token " + token2}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
# Create ticket
payload = {
"title": "Ticket Title",
"description": "Ticket Description",
"column": created_column["id"],
}
response = self.client.post(reverse("tickets:tickets"),
json.dumps(payload),
content_type="application/json",
**headers2)
error = json.loads(response.content)
self.assertEqual(response.status_code, 401)
self.assertEqual(error["error"], "You don't have access to the board")
def test_create_and_ticket_list(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
# Get token
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
self.assertEqual(response.status_code, 201)
boardid = created_board["id"]
# Create ticket
created_tickets = []
number_of_tickets = 5
for n in range(number_of_tickets):
payload = {
"title": "Ticket Title" + str(n),
"description": "Ticket Description" + str(n),
"column": created_column["id"],
}
response = self.client.post(reverse("tickets:tickets"),
json.dumps(payload),
content_type="application/json",
**headers)
ticket_created = json.loads(response.content)
created_tickets.append((response, ticket_created))
url = reverse("tickets:tickets") + "?boardid=" + str(boardid)
response = self.client.get(url, **headers)
tickets = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(tickets), number_of_tickets)
for n in range(number_of_tickets):
self.assertIn(created_tickets[n][1], tickets)
def test_update_ticket(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
# Create ticket
payload = {
"title": "Ticket Title",
"description": "Ticket Description",
"column": created_column["id"],
}
response = self.client.post(reverse("tickets:tickets"),
json.dumps(payload),
content_type="application/json",
**headers)
ticket = json.loads(response.content)
ticketid = ticket["id"]
# Update Ticket
newTitle = "New title"
newDescription = "New description"
payload = {
"title": newTitle,
"description": newDescription,
}
response = self.client.patch(reverse("tickets:ticket",
kwargs={"pk": ticketid}),
payload,
content_type="application/json",
**headers)
ticket = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(ticket["title"], payload["title"])
self.assertEqual(ticket["description"], payload["description"])
# Get Ticket
response = self.client.get(
reverse("tickets:ticket", kwargs={"pk": ticketid}), **headers)
ticket = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(ticket["title"], payload["title"])
self.assertEqual(ticket["description"], payload["description"])
def test_delete_ticket(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
# Create ticket
payload = {
"title": "Ticket Title",
"description": "Ticket Description",
"column": created_column["id"],
}
response = self.client.post(reverse("tickets:tickets"),
json.dumps(payload),
content_type="application/json",
**headers)
ticket = json.loads(response.content)
ticketid = ticket["id"]
# Delete Ticket
response = self.client.delete(
reverse("tickets:ticket", kwargs={"pk": ticketid}), **headers)
ticket = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(ticket["title"], payload["title"])
self.assertEqual(ticket["description"], payload["description"])
# Get Ticket
response = self.client.get(
reverse("tickets:ticket", kwargs={"pk": ticketid}), **headers)
ticket = json.loads(response.content)
self.assertEqual(response.status_code, 404)
def test_column_create(self):
# Create user
username = "Tom"
password = "123"
(response, created_user) = self.create_user(username, password)
(response, data) = self.get_token(username, password)
token = data["token"]
headers = {"HTTP_AUTHORIZATION": "Token " + token}
# Create board
name = "Board Name for tickets"
(response, created_board,
created_column) = self.create_board(name, headers=headers)
boardid = created_board["id"]
payload = {
"title": "In progress",
}
# Create column
url = reverse("tickets:board_columns", kwargs={"pk": boardid})
response = self.client.post(url,
json.dumps(payload),
content_type="application/json",
**headers)
self.assertEqual(response.status_code, 201)
# Get column
response = self.client.get(
reverse("tickets:board_columns", kwargs={"pk": boardid}),
**headers)
columns = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(columns[1]["title"], payload["title"])
| 39.374768
| 78
| 0.569618
| 2,088
| 21,223
| 5.654693
| 0.068966
| 0.064792
| 0.037944
| 0.054036
| 0.823579
| 0.802829
| 0.772847
| 0.726603
| 0.692301
| 0.673245
| 0
| 0.01596
| 0.315083
| 21,223
| 538
| 79
| 39.447955
| 0.796299
| 0.046648
| 0
| 0.714623
| 0
| 0
| 0.103665
| 0.004273
| 0
| 0
| 0
| 0
| 0.134434
| 1
| 0.066038
| false
| 0.167453
| 0.007075
| 0
| 0.089623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
c9233b0f41114fc18617e0e6148267e3cc4b9e7e
| 64
|
py
|
Python
|
The Core/01 - addTwoDigits.py
|
lucasalme1da/codesignal
|
faff1ae635d04a33a1b59e6f751d266fabca5e71
|
[
"MIT"
] | 2
|
2020-04-15T00:15:03.000Z
|
2021-02-17T18:43:08.000Z
|
The Core/01 - addTwoDigits.py
|
lucasalme1da/codesignal
|
faff1ae635d04a33a1b59e6f751d266fabca5e71
|
[
"MIT"
] | null | null | null |
The Core/01 - addTwoDigits.py
|
lucasalme1da/codesignal
|
faff1ae635d04a33a1b59e6f751d266fabca5e71
|
[
"MIT"
] | null | null | null |
def addTwoDigits(n):
return int(str(n)[0]) + int(str(n)[1])
| 21.333333
| 42
| 0.59375
| 12
| 64
| 3.166667
| 0.666667
| 0.315789
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.15625
| 64
| 2
| 43
| 32
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
c9287aead3165b47fba22a664aab50ec4fa3458e
| 7,786
|
py
|
Python
|
Scripts/Examples/FL6-Font-Drop Anchors.py
|
twardoch/TypeRig
|
121838d98ed41160dbebf575d0a5623def0ce256
|
[
"BSD-3-Clause"
] | 1
|
2020-07-11T06:18:49.000Z
|
2020-07-11T06:18:49.000Z
|
Scripts/Examples/FL6-Font-Drop Anchors.py
|
twardoch/TypeRig
|
121838d98ed41160dbebf575d0a5623def0ce256
|
[
"BSD-3-Clause"
] | null | null | null |
Scripts/Examples/FL6-Font-Drop Anchors.py
|
twardoch/TypeRig
|
121838d98ed41160dbebf575d0a5623def0ce256
|
[
"BSD-3-Clause"
] | null | null | null |
#FLM: Font: Auto Anchors Drop
# VER: 1.0
#----------------------------------
# Foundry: Borges Type
# Typeface: Future Tense
# Date: 22.10.2018
#----------------------------------
# - Dependancies
import fontlab as fl6
from typerig.proxy import pFont
from typerig.glyph import eGlyph
# - Init ------------------------------------------------
font = pFont()
clear_anchors = True
work_layer = None
# -- Diacritic creation pattern (config dictionary)
diac_cfg_all = {
'A':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'C':[('top', work_layer, (0, 800), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'D':[('top', work_layer, (0, 800), ('A', None), 5, False, False) ],
'E':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'G':[('top', work_layer, (0, 800), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'H':[('top', work_layer, (0, 800), ('AT', None), 5, False, False) ],
'I':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'J':[('top', work_layer, (0, 800), ('AT', None), 5, False, False) ],
'K':[('top', work_layer, (-30, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (-10, 0), ('C', None), 5, False, False)],
'L':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'N':[('top', work_layer, (0, 800), ('A', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'O':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'R':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'S':[('top', work_layer, (0, 800), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'T':[('top', work_layer, (0, 800), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'U':[('top', work_layer, (0, 800), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'W':[('top', work_layer, (0, 800), ('A', None), 5, False, False) ],
'Y':[('top', work_layer, (0, 800), ('C', None), 5, False, False) ],
'Z':[('top', work_layer, (0, 800), ('AT', None), 5, False, False) ],
'a':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'c':[('top', work_layer, (0, 695), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'd':[('top', work_layer, (0, 695), ('A', None), 5, False, False) ],
'e':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'g':[('top', work_layer, (0, 695), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'h':[('top', work_layer, (0, 695), ('AT', None), 5, False, False) ],
'i':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'j':[('top', work_layer, (0, 695), ('AT', None), 5, False, False) ],
'k':[('top', work_layer, (-20, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (-10, 0), ('C', None), 5, False, False)],
'l':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'n':[('top', work_layer, (0, 695), ('A', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'o':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'r':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
's':[('top', work_layer, (0, 695), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
't':[('top', work_layer, (0, 695), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'u':[('top', work_layer, (0, 695), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'w':[('top', work_layer, (0, 695), ('A', None), 5, False, False) ],
'y':[('top', work_layer, (0, 695), ('C', None), 5, False, False) ],
'z':[('top', work_layer, (0, 695), ('AT', None), 5, False, False) ]
}
diac_cfg_smcp = {
'a':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'c':[('top', work_layer, (0, 625), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'd':[('top', work_layer, (0, 625), ('A', None), 5, False, False) ],
'e':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'g':[('top', work_layer, (0, 625), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'h':[('top', work_layer, (0, 625), ('AT', None), 5, False, False) ],
'i':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('R', None), 5, False, False)],
'j':[('top', work_layer, (0, 625), ('AT', None), 5, False, False) ],
'k':[('top', work_layer, (-20, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (-10, 0), ('C', None), 5, False, False)],
'l':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'n':[('top', work_layer, (0, 625), ('A', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'o':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'r':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
's':[('top', work_layer, (0, 625), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
't':[('top', work_layer, (0, 625), ('AT', None), 5, False, False), ('bottom', work_layer, (0, 0), ('A', None), 5, False, False)],
'u':[('top', work_layer, (0, 625), ('C', None), 5, False, False), ('bottom', work_layer, (0, 0), ('C', None), 5, False, False)],
'w':[('top', work_layer, (0, 625), ('A', None), 5, False, False) ],
'y':[('top', work_layer, (0, 625), ('C', None), 5, False, False) ],
'z':[('top', work_layer, (0, 625), ('AT', None), 5, False, False) ]
}
# - Procedures ---------------------------------------------
def dropAnchors(glyph, control):
# - Init
work_name = glyph.name.split('.')[0]
# - Process
if work_name in control.keys():
work_glyph = eGlyph(font.fg, glyph)
#work_glyph.clearAnchors(work_layer)
for ctr_tuple in control[work_name]:
work_glyph.dropAnchor(*ctr_tuple)
work_glyph.update()
work_glyph.updateObject(work_glyph.fl, 'Drop anchors: %s.' %work_glyph.name)
# - Process ------------------------------------------------
for glyph in font.glyphs():
if 'smcp' not in glyph.name:
dropAnchors(glyph, diac_cfg_all)
if 'smcp' in glyph.name:
dropAnchors(glyph, diac_cfg_smcp)
# - Finish
font.update()
print 'DONE.'
| 68.902655
| 137
| 0.514642
| 1,159
| 7,786
| 3.354616
| 0.083693
| 0.226852
| 0.246914
| 0.37037
| 0.841821
| 0.841049
| 0.841049
| 0.82356
| 0.82356
| 0.817901
| 0
| 0.065116
| 0.17159
| 7,786
| 113
| 138
| 68.902655
| 0.537674
| 0.060108
| 0
| 0
| 0
| 0
| 0.08517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.036585
| null | null | 0.012195
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c93a91a3b01d7e3dd65d9c4574395812a29410c8
| 230
|
py
|
Python
|
pants-plugins/experimental/mypyc/register.py
|
lilatomic/pants-plugins
|
0757fcf52325fa5809211f7a7a25081a134333a5
|
[
"Apache-2.0"
] | 4
|
2022-02-14T23:14:21.000Z
|
2022-03-29T12:39:26.000Z
|
pants-plugins/experimental/mypyc/register.py
|
lilatomic/pants-plugins
|
0757fcf52325fa5809211f7a7a25081a134333a5
|
[
"Apache-2.0"
] | 36
|
2022-02-02T05:01:04.000Z
|
2022-03-31T16:46:34.000Z
|
pants-plugins/experimental/mypyc/register.py
|
lilatomic/pants-plugins
|
0757fcf52325fa5809211f7a7a25081a134333a5
|
[
"Apache-2.0"
] | 2
|
2022-02-14T04:16:19.000Z
|
2022-03-02T11:22:37.000Z
|
from experimental.mypyc.rules import rules as mypyc_rules
from experimental.mypyc.target_types import MyPycPythonDistribution
def rules():
return (*mypyc_rules(),)
def target_types():
return (MyPycPythonDistribution,)
| 20.909091
| 67
| 0.786957
| 26
| 230
| 6.807692
| 0.423077
| 0.169492
| 0.237288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 230
| 10
| 68
| 23
| 0.885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a31cf33d8480498361f7aa3e7174b19a227c9765
| 1,327
|
py
|
Python
|
main.py
|
statsu1990/kaggle_ion_switching
|
487025284cdfc79741a744e73f77b4bf86490e30
|
[
"MIT"
] | null | null | null |
main.py
|
statsu1990/kaggle_ion_switching
|
487025284cdfc79741a744e73f77b4bf86490e30
|
[
"MIT"
] | null | null | null |
main.py
|
statsu1990/kaggle_ion_switching
|
487025284cdfc79741a744e73f77b4bf86490e30
|
[
"MIT"
] | null | null | null |
import make_model as mm
#mm.Model_v5_0_0().train_model()
#mm.Model_v5_0_1().train_model()
#mm.Model_v5_0_2().train_model()
#mm.Model_v5_0_3().train_model()
#mm.Model_v5_0_4().train_model()
#mm.Model_v5_0_5().train_model()
#mm.Model_v5_0_11().train_model()
#mm.Model_v5_0_10().train_model()
#mm.Model_v5_0_9().train_model()
#mm.Model_v5_0_8().train_model()
#mm.Model_v5_0_8_1().pred_test()
#mm.Model_v5_0_7().train_model()
#mm.Model_v5_0_6().train_model()
#mm.Model_v5_0_5_1().train_model()
#mm.Model_v5_0_5_2().train_model()
#mm.Model_v5_0_5_3().train_model()
#mm.Model_v5_0_5_4().train_model()
#mm.Model_v5_0_5_5().train_model()
#mm.Model_v5_0_5_6().train_model()
#mm.Model_v5_0_5_7().train_model()
#mm.Model_v5_0_5_8().train_model()
#mm.Model_v5_0_5_9().train_model()
#mm.Model_v5_0_12().train_model()
#mm.Model_v5_1_0().train_model()
#mm.Model_v5_1_1().train_model()
#mm.Model_v5_2_0_0().train_model()
#mm.Model_v5_2_0_1().train_model()
#mm.Model_v5_3_0().train_model()
#mm.Model_v5_3_1().train_model()
#mm.Model_v5_3_2().train_model()
#mm.Model_v6_0_0().train_model()
#mm.Model_v6_0_1().train_model()
#mm.Model_v6_0_2().train_model()
#mm.Model_v6_0_3().train_model()
#mm.Model_v6_1_0().train_model()
mm.Model_v6_1_1().train_model()
#mm.Model_v6_1_2().train_model()
| 27.081633
| 35
| 0.730972
| 276
| 1,327
| 2.931159
| 0.083333
| 0.320148
| 0.519159
| 0.735476
| 0.92089
| 0.92089
| 0.663782
| 0.108776
| 0
| 0
| 0
| 0.102606
| 0.074604
| 1,327
| 48
| 36
| 27.645833
| 0.556189
| 0.859834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
a33ea9a2e20e6c1aff15ddddf949d51f44be754e
| 16,088
|
py
|
Python
|
scripts/options.py
|
edwardoughton/ictp4d
|
0e36b3c4515e57cc9210bd22f2ab761f2aa750d6
|
[
"MIT"
] | 4
|
2021-02-07T19:36:57.000Z
|
2021-05-20T16:46:02.000Z
|
scripts/options.py
|
edwardoughton/ictp4d
|
0e36b3c4515e57cc9210bd22f2ab761f2aa750d6
|
[
"MIT"
] | null | null | null |
scripts/options.py
|
edwardoughton/ictp4d
|
0e36b3c4515e57cc9210bd22f2ab761f2aa750d6
|
[
"MIT"
] | null | null | null |
"""
Options consisting of scenarios and strategies.
Country parameters consist of those parameters which are specific
to each country.
Written by Ed Oughton
January 2020
#strategy is defined based on generation_core_backhaul_sharing_networks_spectrum_tax
generation: technology generation, so 3G or 4G
core: type of core data transport network, eg. evolved packet core (4G)
backhaul: type of backhaul, so fiber or wireless
sharing: the type of infrastructure sharing, active, passive etc..
network: relates to the number of networks, as defined in country parameters
spectrum: type of spectrum strategy, so baseline, high or low
tax: type of taxation strategy, so baseline, high or low
integration: option to undertake regional integration
"""
OPTIONS = {
'technology_options': [
{
'scenario': 'low_10_10_10',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '3G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '3G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_fiber_baseline_baseline_baseline_baseline_baseline',
},
],
'business_model_options': [
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_10_10_10',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_10_10_10',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_10_10_10',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'low_2_2_2',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'baseline_2_2_2',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_wireless_baseline_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_wireless_psb_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_wireless_moran_baseline_baseline_baseline_baseline',
},
{
'scenario': 'high_2_2_2',
'strategy': '4G_epc_wireless_srn_srn_baseline_baseline_baseline',
},
],
}
COUNTRY_PARAMETERS = {
'CIV': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 6,
'low': 2,
},
'networks': {
'baseline_urban': 3,
'baseline_suburban': 3,
'baseline_rural': 3,
'srn_urban': 3,
'srn_suburban': 3,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 800,
'bandwidth': '2x10',
},
{
'frequency': 1800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.04,
'spectrum_capacity_baseline_usd_mhz_pop': 0.03,
'tax_low': 10,
'tax_baseline': 25,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
'MLI': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 6,
'low': 2,
},
'networks': {
'baseline_urban': 2,
'baseline_suburban': 2,
'baseline_rural': 2,
'srn_urban': 2,
'srn_suburban': 2,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 700,
'bandwidth': '2x10',
},
{
'frequency': 1800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.04,
'spectrum_capacity_baseline_usd_mhz_pop': 0.03,
'tax_low': 10,
'tax_baseline': 30,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
'SEN': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 6,
'low': 2,
},
'networks': {
'baseline_urban': 3,
'baseline_suburban': 3,
'baseline_rural': 3,
'srn_urban': 3,
'srn_suburban': 3,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 800,
'bandwidth': '2x10',
},
{
'frequency': 1800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.04,
'spectrum_capacity_baseline_usd_mhz_pop': 0.03,
'tax_low': 10,
'tax_baseline': 30,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
'KEN': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 6,
'low': 2,
},
'networks': {
'baseline_urban': 3,
'baseline_suburban': 3,
'baseline_rural': 3,
'srn_urban': 3,
'srn_suburban': 3,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 700,
'bandwidth': '2x10',
},
{
'frequency': 800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.1,
'spectrum_capacity_baseline_usd_mhz_pop': 0.08,
'tax_low': 10,
'tax_baseline': 30,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
'TZA': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 3,
'low': 2,
},
'networks': {
'baseline_urban': 3,
'baseline_suburban': 3,
'baseline_rural': 3,
'srn_urban': 3,
'srn_suburban': 3,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 700,
'bandwidth': '2x10',
},
{
'frequency': 1800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.1,
'spectrum_capacity_baseline_usd_mhz_pop': 0.08,
'tax_low': 10,
'tax_baseline': 30,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
'UGA': {
'luminosity': {
'high': 5,
'medium': 1,
},
'arpu': {
'high': 8,
'medium': 3,
'low': 2,
},
'networks': {
'baseline_urban': 3,
'baseline_suburban': 3,
'baseline_rural': 3,
'srn_urban': 3,
'srn_suburban': 3,
'srn_rural': 1,
},
'frequencies': {
'3G': [
{
'frequency': 1800,
'bandwidth': '2x10',
},
{
'frequency': 2100,
'bandwidth': '2x10',
},
],
'4G': [
{
'frequency': 800,
'bandwidth': '2x10',
},
{
'frequency': 1800,
'bandwidth': '2x10',
},
],
},
'financials': {
'wacc': 15,
'profit_margin': 10,
'spectrum_coverage_baseline_usd_mhz_pop': 0.1,
'spectrum_capacity_baseline_usd_mhz_pop': 0.08,
'tax_low': 10,
'tax_baseline': 30,
'tax_high': 40,
'administration_percentage_of_network_cost': 10,
},
},
}
| 30.240602
| 87
| 0.461027
| 1,318
| 16,088
| 5.146434
| 0.08953
| 0.396285
| 0.424591
| 0.339673
| 0.901371
| 0.901371
| 0.89341
| 0.891346
| 0.889724
| 0.882648
| 0
| 0.066451
| 0.424727
| 16,088
| 531
| 88
| 30.297552
| 0.666451
| 0.045997
| 0
| 0.608696
| 0
| 0
| 0.425451
| 0.225569
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a346661106836df18572fe6396ffa0e8a37e0707
| 69
|
py
|
Python
|
server/ferret/pnw/export/pnw.py
|
Lyrositor/pnw-ferret
|
3eacd2f56e9811c1ccc9c5dafdcb4738ca767193
|
[
"CC0-1.0"
] | 2
|
2019-11-02T22:40:13.000Z
|
2019-11-07T23:02:35.000Z
|
server/ferret/pnw/export/pnw.py
|
Lyrositor/pnw-ferret
|
3eacd2f56e9811c1ccc9c5dafdcb4738ca767193
|
[
"CC0-1.0"
] | null | null | null |
server/ferret/pnw/export/pnw.py
|
Lyrositor/pnw-ferret
|
3eacd2f56e9811c1ccc9c5dafdcb4738ca767193
|
[
"CC0-1.0"
] | null | null | null |
from ferret.pnw.constants import *
from ferret.pnw.formulas import *
| 23
| 34
| 0.797101
| 10
| 69
| 5.5
| 0.6
| 0.363636
| 0.472727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 35
| 34.5
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a36d0ec395c69e356042f2685212ab64465c76e1
| 47,662
|
py
|
Python
|
report/customer_vendor_statement.py
|
alconor/partner_statement
|
4aead85436a065b3d8608aef06d8ec7a9169e973
|
[
"Unlicense"
] | null | null | null |
report/customer_vendor_statement.py
|
alconor/partner_statement
|
4aead85436a065b3d8608aef06d8ec7a9169e973
|
[
"Unlicense"
] | null | null | null |
report/customer_vendor_statement.py
|
alconor/partner_statement
|
4aead85436a065b3d8608aef06d8ec7a9169e973
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp import api, fields, models
class CustomerVendorStatement(models.AbstractModel):
"""Model of Customer Activity Statement"""
_name = 'report.customer_vendor_statement.statement'
def _format_date_to_partner_lang(self, str_date, partner_id):
lang_code = self.env['res.partner'].browse(partner_id).lang
lang_id = self.env['res.lang']._lang_get(lang_code)
lang = self.env['res.lang'].browse(lang_id)
date = datetime.strptime(str_date, DEFAULT_SERVER_DATE_FORMAT).date()
return date.strftime(lang_id.date_format)
def _initial_balance_sql_q1(self, partners, date_start):
return """
SELECT l.partner_id, l.currency_id, l.company_id,
CASE WHEN l.currency_id is not null AND l.amount_currency > 0.0
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN l.currency_id is not null AND l.amount_currency < 0.0
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND at.type = 'receivable'
AND l.date <= '%s' AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.amount_currency,
l.company_id
""" % (partners, date_start)
def _initial_balance_sql_q1_payable(self, partners, date_start):
return """
SELECT l.partner_id, l.currency_id, l.company_id,
CASE WHEN l.currency_id is not null AND l.amount_currency > 0.0
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN l.currency_id is not null AND l.amount_currency < 0.0
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND at.type = 'payable'
AND l.date <= '%s' AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.amount_currency,
l.company_id
""" % (partners, date_start)
def _initial_balance_sql_q1_receivable_and_payable(self, partners, date_start):
return """
SELECT l.partner_id, l.currency_id, l.company_id,
CASE WHEN l.currency_id is not null AND l.amount_currency > 0.0
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN l.currency_id is not null AND l.amount_currency < 0.0
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND (at.type = 'payable' OR at.type = 'receivable')
AND l.date <= '%s' AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.amount_currency,
l.company_id
""" % (partners, date_start)
def _initial_balance_sql_q2(self, company_id):
return """
SELECT Q1.partner_id, debit-credit AS balance,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _initial_balance_sql_q2_payable(self, company_id):
return """
SELECT Q1.partner_id, debit-credit AS balance,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _initial_balance_sql_q2_receivable_and_payable(self, company_id):
return """
SELECT Q1.partner_id, debit-credit AS balance,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _get_account_initial_balance(self, company_id, partner_ids,
date_start):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, currency_id, balance
FROM Q2""" % (self._initial_balance_sql_q1(partners, date_start),
self._initial_balance_sql_q2(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_initial_balance_payable(self, company_id, partner_ids,
date_start):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, currency_id, balance
FROM Q2""" % (self._initial_balance_sql_q1_payable(partners, date_start),
self._initial_balance_sql_q2_payable(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_initial_balance_receivable_and_payable(self, company_id, partner_ids,
date_start):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, currency_id, balance
FROM Q2""" % (self._initial_balance_sql_q1_receivable_and_payable(partners, date_start),
self._initial_balance_sql_q2_receivable_and_payable(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _display_lines_sql_q1(self, partners, date_start, date_end):
return """
SELECT m.name AS move_id, l.partner_id, l.date, l.name,
l.ref, l.blocked, l.currency_id, l.company_id,
CASE WHEN (l.currency_id is not null AND l.amount_currency > 0.0)
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN (l.currency_id is not null AND l.amount_currency < 0.0)
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND at.type = 'receivable'
AND '%s' < l.date AND l.date <= '%s'
GROUP BY l.partner_id, m.name, l.date, l.date_maturity, l.name,
l.ref, l.blocked, l.currency_id,
l.amount_currency, l.company_id
""" % (partners, date_start, date_end)
def _display_lines_sql_q1_payable(self, partners, date_start, date_end):
return """
SELECT m.name AS move_id, l.partner_id, l.date, l.name,
l.ref, l.blocked, l.currency_id, l.company_id,
CASE WHEN (l.currency_id is not null AND l.amount_currency > 0.0)
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN (l.currency_id is not null AND l.amount_currency < 0.0)
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND at.type = 'payable'
AND '%s' < l.date AND l.date <= '%s'
GROUP BY l.partner_id, m.name, l.date, l.date_maturity, l.name,
l.ref, l.blocked, l.currency_id,
l.amount_currency, l.company_id
""" % (partners, date_start, date_end)
def _display_lines_sql_q1_receivable_and_payable(self, partners, date_start, date_end):
return """
SELECT m.name AS move_id, l.partner_id, l.date, l.name,
l.ref, l.blocked, l.currency_id, l.company_id,
CASE WHEN (l.currency_id is not null AND l.amount_currency > 0.0)
THEN sum(l.amount_currency)
ELSE sum(l.debit)
END as debit,
CASE WHEN (l.currency_id is not null AND l.amount_currency < 0.0)
THEN sum(l.amount_currency * (-1))
ELSE sum(l.credit)
END as credit,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
WHERE l.partner_id IN (%s) AND (at.type = 'payable' OR at.type = 'receivable')
AND '%s' < l.date AND l.date <= '%s'
GROUP BY l.partner_id, m.name, l.date, l.date_maturity, l.name,
l.ref, l.blocked, l.currency_id,
l.amount_currency, l.company_id
""" % (partners, date_start, date_end)
def _display_lines_sql_q2(self, company_id):
return """
SELECT Q1.partner_id, move_id, date, date_maturity, Q1.name, ref,
debit, credit, debit-credit as amount, blocked,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _display_lines_sql_q2_payable(self, company_id):
return """
SELECT Q1.partner_id, move_id, date, date_maturity, Q1.name, ref,
debit, credit, debit-credit as amount, blocked,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _display_lines_sql_q2_receivable_and_payable(self, company_id):
return """
SELECT Q1.partner_id, move_id, date, date_maturity, Q1.name, ref,
debit, credit, debit-credit as amount, blocked,
COALESCE(Q1.currency_id, c.currency_id) AS currency_id
FROM Q1
JOIN res_company c ON (c.id = Q1.company_id)
WHERE c.id = %s
""" % company_id
def _get_account_display_lines(self, company_id, partner_ids, date_start,
date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, move_id, date, date_maturity, name, ref, debit,
credit, amount, blocked, currency_id
FROM Q2
ORDER BY date, date_maturity, move_id""" % (
self._display_lines_sql_q1(partners, date_start, date_end),
self._display_lines_sql_q2(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_display_lines_payable(self, company_id, partner_ids, date_start,
date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, move_id, date, date_maturity, name, ref, debit,
credit, amount, blocked, currency_id
FROM Q2
ORDER BY date, date_maturity, move_id""" % (
self._display_lines_sql_q1_payable(partners, date_start, date_end),
self._display_lines_sql_q2_payable(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_display_lines_receivable_and_payable(self, company_id, partner_ids, date_start,
date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_start = datetime.strptime(
date_start, DEFAULT_SERVER_DATE_FORMAT).date()
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s)
SELECT partner_id, move_id, date, date_maturity, name, ref, debit,
credit, amount, blocked, currency_id
FROM Q2
ORDER BY date, date_maturity, move_id""" % (
self._display_lines_sql_q1_receivable_and_payable(partners, date_start, date_end),
self._display_lines_sql_q2_receivable_and_payable(company_id)))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _show_buckets_sql_q1(self, partners, date_end):
return """
SELECT l.partner_id, l.currency_id, l.company_id, l.move_id,
CASE WHEN l.balance > 0.0
THEN l.balance - sum(coalesce(pd.amount, 0.0))
ELSE l.balance + sum(coalesce(pc.amount, 0.0))
END AS open_due,
CASE WHEN l.balance > 0.0
THEN l.amount_currency - sum(coalesce(pd.amount_currency, 0.0))
ELSE l.amount_currency + sum(coalesce(pc.amount_currency, 0.0))
END AS open_due_currency,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.credit_move_id = l2.id
WHERE l2.date <= '%s'
) as pd ON pd.debit_move_id = l.id
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.debit_move_id = l2.id
WHERE l2.date <= '%s'
) as pc ON pc.credit_move_id = l.id
WHERE l.partner_id IN (%s) AND at.type = 'receivable'
AND not l.reconciled AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.date, l.date_maturity,
l.amount_currency, l.balance, l.move_id,
l.company_id
""" % (date_end, date_end, partners)
def _show_buckets_sql_q1_payable(self, partners, date_end):
return """
SELECT l.partner_id, l.currency_id, l.company_id, l.move_id,
CASE WHEN l.balance > 0.0
THEN l.balance - sum(coalesce(pd.amount, 0.0))
ELSE l.balance + sum(coalesce(pc.amount, 0.0))
END AS open_due,
CASE WHEN l.balance > 0.0
THEN l.amount_currency - sum(coalesce(pd.amount_currency, 0.0))
ELSE l.amount_currency + sum(coalesce(pc.amount_currency, 0.0))
END AS open_due_currency,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.credit_move_id = l2.id
WHERE l2.date <= '%s'
) as pd ON pd.debit_move_id = l.id
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.debit_move_id = l2.id
WHERE l2.date <= '%s'
) as pc ON pc.credit_move_id = l.id
WHERE l.partner_id IN (%s) AND at.type = 'payable'
AND not l.reconciled AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.date, l.date_maturity,
l.amount_currency, l.balance, l.move_id,
l.company_id
""" % (date_end, date_end, partners)
def _show_buckets_sql_q1_receivable_and_payable(self, partners, date_end):
return """
SELECT l.partner_id, l.currency_id, l.company_id, l.move_id,
CASE WHEN l.balance > 0.0
THEN l.balance - sum(coalesce(pd.amount, 0.0))
ELSE l.balance + sum(coalesce(pc.amount, 0.0))
END AS open_due,
CASE WHEN l.balance > 0.0
THEN l.amount_currency - sum(coalesce(pd.amount_currency, 0.0))
ELSE l.amount_currency + sum(coalesce(pc.amount_currency, 0.0))
END AS open_due_currency,
CASE WHEN l.date_maturity is null
THEN l.date
ELSE l.date_maturity
END as date_maturity
FROM account_move_line l
JOIN account_account_type at ON (at.id = l.user_type_id)
JOIN account_move m ON (l.move_id = m.id)
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.credit_move_id = l2.id
WHERE l2.date <= '%s'
) as pd ON pd.debit_move_id = l.id
LEFT JOIN (SELECT pr.*
FROM account_partial_reconcile pr
INNER JOIN account_move_line l2
ON pr.debit_move_id = l2.id
WHERE l2.date <= '%s'
) as pc ON pc.credit_move_id = l.id
WHERE l.partner_id IN (%s) AND (at.type = 'payable' OR at.type = 'receivable')
AND not l.reconciled AND not l.blocked
GROUP BY l.partner_id, l.currency_id, l.date, l.date_maturity,
l.amount_currency, l.balance, l.move_id,
l.company_id
""" % (date_end, date_end, partners)
def _show_buckets_sql_q2(self, today, minus_30, minus_60, minus_90,
minus_120):
return """
SELECT partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id,
CASE
WHEN '%s' <= date_maturity AND currency_id is null
THEN open_due
WHEN '%s' <= date_maturity AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as current,
CASE
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_1_30,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_30_60,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_60_90,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_90_120,
CASE
WHEN date_maturity <= '%s' AND currency_id is null
THEN open_due
WHEN date_maturity <= '%s' AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_over_120
FROM Q1
GROUP BY partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id
""" % (today, today, minus_30, today, minus_30, today, minus_60,
minus_30, minus_60, minus_30, minus_90, minus_60, minus_90,
minus_60, minus_120, minus_90, minus_120, minus_90, minus_120,
minus_120)
def _show_buckets_sql_q2_payable(self, today, minus_30, minus_60, minus_90,
minus_120):
return """
SELECT partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id,
CASE
WHEN '%s' <= date_maturity AND currency_id is null
THEN open_due
WHEN '%s' <= date_maturity AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as current,
CASE
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_1_30,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_30_60,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_60_90,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_90_120,
CASE
WHEN date_maturity <= '%s' AND currency_id is null
THEN open_due
WHEN date_maturity <= '%s' AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_over_120
FROM Q1
GROUP BY partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id
""" % (today, today, minus_30, today, minus_30, today, minus_60,
minus_30, minus_60, minus_30, minus_90, minus_60, minus_90,
minus_60, minus_120, minus_90, minus_120, minus_90, minus_120,
minus_120)
def _show_buckets_sql_q2_receivable_and_payable(self, today, minus_30, minus_60, minus_90,
minus_120):
return """
SELECT partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id,
CASE
WHEN '%s' <= date_maturity AND currency_id is null
THEN open_due
WHEN '%s' <= date_maturity AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as current,
CASE
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity < '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_1_30,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_30_60,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_60_90,
CASE
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is null THEN open_due
WHEN '%s' < date_maturity AND date_maturity <= '%s'
AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_90_120,
CASE
WHEN date_maturity <= '%s' AND currency_id is null
THEN open_due
WHEN date_maturity <= '%s' AND currency_id is not null
THEN open_due_currency
ELSE 0.0
END as b_over_120
FROM Q1
GROUP BY partner_id, currency_id, date_maturity, open_due,
open_due_currency, move_id, company_id
""" % (today, today, minus_30, today, minus_30, today, minus_60,
minus_30, minus_60, minus_30, minus_90, minus_60, minus_90,
minus_60, minus_120, minus_90, minus_120, minus_90, minus_120,
minus_120)
def _show_buckets_sql_q3(self, company_id):
return """
SELECT Q2.partner_id, current, b_1_30, b_30_60, b_60_90, b_90_120,
b_over_120,
COALESCE(Q2.currency_id, c.currency_id) AS currency_id
FROM Q2
JOIN res_company c ON (c.id = Q2.company_id)
WHERE c.id = %s
""" % company_id
def _show_buckets_sql_q3_payable(self, company_id):
return """
SELECT Q2.partner_id, current, b_1_30, b_30_60, b_60_90, b_90_120,
b_over_120,
COALESCE(Q2.currency_id, c.currency_id) AS currency_id
FROM Q2
JOIN res_company c ON (c.id = Q2.company_id)
WHERE c.id = %s
""" % company_id
def _show_buckets_sql_q3_receivable_and_payable(self, company_id):
return """
SELECT Q2.partner_id, current, b_1_30, b_30_60, b_60_90, b_90_120,
b_over_120,
COALESCE(Q2.currency_id, c.currency_id) AS currency_id
FROM Q2
JOIN res_company c ON (c.id = Q2.company_id)
WHERE c.id = %s
""" % company_id
def _show_buckets_sql_q4(self):
return """
SELECT partner_id, currency_id, sum(current) as current,
sum(b_1_30) as b_1_30,
sum(b_30_60) as b_30_60,
sum(b_60_90) as b_60_90,
sum(b_90_120) as b_90_120,
sum(b_over_120) as b_over_120
FROM Q3
GROUP BY partner_id, currency_id
"""
def _show_buckets_sql_q4_payable(self):
return """
SELECT partner_id, currency_id, sum(current) as current,
sum(b_1_30) as b_1_30,
sum(b_30_60) as b_30_60,
sum(b_60_90) as b_60_90,
sum(b_90_120) as b_90_120,
sum(b_over_120) as b_over_120
FROM Q3
GROUP BY partner_id, currency_id
"""
def _show_buckets_sql_q4_receivable_and_payable(self):
return """
SELECT partner_id, currency_id, sum(current) as current,
sum(b_1_30) as b_1_30,
sum(b_30_60) as b_30_60,
sum(b_60_90) as b_60_90,
sum(b_90_120) as b_90_120,
sum(b_over_120) as b_over_120
FROM Q3
GROUP BY partner_id, currency_id
"""
_bucket_dates = {
'today': fields.date.today(),
'minus_30': fields.date.today() - timedelta(days=30),
'minus_60': fields.date.today() - timedelta(days=60),
'minus_90': fields.date.today() - timedelta(days=90),
'minus_120': fields.date.today() - timedelta(days=120),
}
def _get_account_show_buckets(self, company_id, partner_ids, date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s),
Q3 AS (%s), Q4 AS (%s)
SELECT partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120,
current+b_1_30+b_30_60+b_60_90+b_90_120+b_over_120
AS balance
FROM Q4
GROUP BY partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120""" % (
self._show_buckets_sql_q1(partners, date_end),
self._show_buckets_sql_q2(
self._bucket_dates['today'],
self._bucket_dates['minus_30'],
self._bucket_dates['minus_60'],
self._bucket_dates['minus_90'],
self._bucket_dates['minus_120']),
self._show_buckets_sql_q3(company_id),
self._show_buckets_sql_q4()))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_show_buckets_payable(self, company_id, partner_ids, date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s),
Q3 AS (%s), Q4 AS (%s)
SELECT partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120,
current+b_1_30+b_30_60+b_60_90+b_90_120+b_over_120
AS balance
FROM Q4
GROUP BY partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120""" % (
self._show_buckets_sql_q1_payable(partners, date_end),
self._show_buckets_sql_q2_payable(
self._bucket_dates['today'],
self._bucket_dates['minus_30'],
self._bucket_dates['minus_60'],
self._bucket_dates['minus_90'],
self._bucket_dates['minus_120']),
self._show_buckets_sql_q3_payable(company_id),
self._show_buckets_sql_q4_payable()))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
def _get_account_show_buckets_receivable_and_payable(self, company_id, partner_ids, date_end):
res = dict(map(lambda x: (x, []), partner_ids))
partners = ', '.join([str(i) for i in partner_ids])
date_end = datetime.strptime(
date_end, DEFAULT_SERVER_DATE_FORMAT).date()
self.env.cr.execute("""WITH Q1 AS (%s), Q2 AS (%s),
Q3 AS (%s), Q4 AS (%s)
SELECT partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120,
current+b_1_30+b_30_60+b_60_90+b_90_120+b_over_120
AS balance
FROM Q4
GROUP BY partner_id, currency_id, current, b_1_30, b_30_60, b_60_90,
b_90_120, b_over_120""" % (
self._show_buckets_sql_q1_receivable_and_payable(partners, date_end),
self._show_buckets_sql_q2_receivable_and_payable(
self._bucket_dates['today'],
self._bucket_dates['minus_30'],
self._bucket_dates['minus_60'],
self._bucket_dates['minus_90'],
self._bucket_dates['minus_120']),
self._show_buckets_sql_q3_receivable_and_payable(company_id),
self._show_buckets_sql_q4_receivable_and_payable()))
for row in self.env.cr.dictfetchall():
res[row.pop('partner_id')].append(row)
return res
@api.multi
def render_html(self, docids,data=None):
model = self.env.context.get('active_model')
docs = self.env[model].browse(self.env.context.get('active_id'))
company_id = data['company_id']
partner_ids = data['partner_ids']
date_start = data['date_start']
date_end = data['date_end']
today = fields.Date.today()
if data['report_type'] == 'receivable':
balance_start_to_display, buckets_to_display = {}, {}
lines_to_display, amount_due = {}, {}
currency_to_display = {}
today_display, date_start_display, date_end_display = {}, {}, {}
balance_start = self._get_account_initial_balance(
company_id, partner_ids, date_start)
for partner_id in partner_ids:
balance_start_to_display[partner_id] = {}
for line in balance_start[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in balance_start_to_display[partner_id]:
balance_start_to_display[partner_id][currency] = []
balance_start_to_display[partner_id][currency] = \
line['balance']
lines = self._get_account_display_lines(
company_id, partner_ids, date_start, date_end)
for partner_id in partner_ids:
lines_to_display[partner_id], amount_due[partner_id] = {}, {}
currency_to_display[partner_id] = {}
today_display[partner_id] = self._format_date_to_partner_lang(
today, partner_id)
date_start_display[partner_id] = self._format_date_to_partner_lang(
date_start, partner_id)
date_end_display[partner_id] = self._format_date_to_partner_lang(
date_end, partner_id)
for line in lines[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in lines_to_display[partner_id]:
lines_to_display[partner_id][currency] = []
currency_to_display[partner_id][currency] = currency
if currency in balance_start_to_display[partner_id]:
amount_due[partner_id][currency] = \
balance_start_to_display[partner_id][currency]
else:
amount_due[partner_id][currency] = 0.0
if not line['blocked']:
amount_due[partner_id][currency] += line['amount']
line['balance'] = amount_due[partner_id][currency]
line['date'] = self._format_date_to_partner_lang(
line['date'], partner_id)
line['date_maturity'] = self._format_date_to_partner_lang(
line['date_maturity'], partner_id)
lines_to_display[partner_id][currency].append(line)
if data['show_aging_buckets']:
buckets = self._get_account_show_buckets(
company_id, partner_ids, date_end)
for partner_id in partner_ids:
buckets_to_display[partner_id] = {}
for line in buckets[partner_id]:
currency = self.env['res.currency'].browse(
line['currency_id'])
if currency not in buckets_to_display[partner_id]:
buckets_to_display[partner_id][currency] = []
buckets_to_display[partner_id][currency] = line
if data['report_type'] == 'payable':
balance_start_to_display, buckets_to_display = {}, {}
lines_to_display, amount_due = {}, {}
currency_to_display = {}
today_display, date_start_display, date_end_display = {}, {}, {}
balance_start = self._get_account_initial_balance_payable(
company_id, partner_ids, date_start)
for partner_id in partner_ids:
balance_start_to_display[partner_id] = {}
for line in balance_start[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in balance_start_to_display[partner_id]:
balance_start_to_display[partner_id][currency] = []
balance_start_to_display[partner_id][currency] = \
line['balance']
lines = self._get_account_display_lines_payable(
company_id, partner_ids, date_start, date_end)
for partner_id in partner_ids:
lines_to_display[partner_id], amount_due[partner_id] = {}, {}
currency_to_display[partner_id] = {}
today_display[partner_id] = self._format_date_to_partner_lang(
today, partner_id)
date_start_display[partner_id] = self._format_date_to_partner_lang(
date_start, partner_id)
date_end_display[partner_id] = self._format_date_to_partner_lang(
date_end, partner_id)
for line in lines[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in lines_to_display[partner_id]:
lines_to_display[partner_id][currency] = []
currency_to_display[partner_id][currency] = currency
if currency in balance_start_to_display[partner_id]:
amount_due[partner_id][currency] = \
balance_start_to_display[partner_id][currency]
else:
amount_due[partner_id][currency] = 0.0
if not line['blocked']:
amount_due[partner_id][currency] += line['amount']
line['balance'] = amount_due[partner_id][currency]
line['date'] = self._format_date_to_partner_lang(
line['date'], partner_id)
line['date_maturity'] = self._format_date_to_partner_lang(
line['date_maturity'], partner_id)
lines_to_display[partner_id][currency].append(line)
if data['show_aging_buckets']:
buckets = self._get_account_show_buckets_payable(
company_id, partner_ids, date_end)
for partner_id in partner_ids:
buckets_to_display[partner_id] = {}
for line in buckets[partner_id]:
currency = self.env['res.currency'].browse(
line['currency_id'])
if currency not in buckets_to_display[partner_id]:
buckets_to_display[partner_id][currency] = []
buckets_to_display[partner_id][currency] = line
if data['report_type'] == 'receivable_and_payable':
balance_start_to_display, buckets_to_display = {}, {}
lines_to_display, amount_due = {}, {}
currency_to_display = {}
today_display, date_start_display, date_end_display = {}, {}, {}
balance_start = self._get_account_initial_balance_receivable_and_payable(
company_id, partner_ids, date_start)
for partner_id in partner_ids:
balance_start_to_display[partner_id] = {}
for line in balance_start[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in balance_start_to_display[partner_id]:
balance_start_to_display[partner_id][currency] = []
balance_start_to_display[partner_id][currency] = \
line['balance']
lines = self._get_account_display_lines_receivable_and_payable(
company_id, partner_ids, date_start, date_end)
for partner_id in partner_ids:
lines_to_display[partner_id], amount_due[partner_id] = {}, {}
currency_to_display[partner_id] = {}
today_display[partner_id] = self._format_date_to_partner_lang(
today, partner_id)
date_start_display[partner_id] = self._format_date_to_partner_lang(
date_start, partner_id)
date_end_display[partner_id] = self._format_date_to_partner_lang(
date_end, partner_id)
for line in lines[partner_id]:
currency = self.env['res.currency'].browse(line['currency_id'])
if currency not in lines_to_display[partner_id]:
lines_to_display[partner_id][currency] = []
currency_to_display[partner_id][currency] = currency
if currency in balance_start_to_display[partner_id]:
amount_due[partner_id][currency] = \
balance_start_to_display[partner_id][currency]
else:
amount_due[partner_id][currency] = 0.0
if not line['blocked']:
amount_due[partner_id][currency] += line['amount']
line['balance'] = amount_due[partner_id][currency]
line['date'] = self._format_date_to_partner_lang(
line['date'], partner_id)
line['date_maturity'] = self._format_date_to_partner_lang(
line['date_maturity'], partner_id)
lines_to_display[partner_id][currency].append(line)
if data['show_aging_buckets']:
buckets = self._get_account_show_buckets_receivable_and_payable(
company_id, partner_ids, date_end)
for partner_id in partner_ids:
buckets_to_display[partner_id] = {}
for line in buckets[partner_id]:
currency = self.env['res.currency'].browse(
line['currency_id'])
if currency not in buckets_to_display[partner_id]:
buckets_to_display[partner_id][currency] = []
buckets_to_display[partner_id][currency] = line
docargs = {
'doc_ids': partner_ids,
'doc_model': 'res.partner',
'docs': self.env['res.partner'].browse(partner_ids),
'Amount_Due': amount_due,
'Balance_forward': balance_start_to_display,
'Lines': lines_to_display,
'Buckets': buckets_to_display,
'Currencies': currency_to_display,
'Show_Buckets': data['show_aging_buckets'],
'Filter_non_due_partners': data['filter_partners_non_due'],
'Date_start': date_start_display,
'Date_end': date_end_display,
'Date': today_display,
}
return self.env['report'].render(
'customer_vendor_statement.statement', values=docargs)
| 50.117771
| 100
| 0.545592
| 5,984
| 47,662
| 4.018382
| 0.028576
| 0.065874
| 0.048782
| 0.035931
| 0.952965
| 0.937744
| 0.93475
| 0.931382
| 0.921692
| 0.912002
| 0
| 0.027919
| 0.370253
| 47,662
| 950
| 101
| 50.170526
| 0.773213
| 0.004574
| 0
| 0.875138
| 0
| 0.00663
| 0.546305
| 0.026331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038674
| false
| 0
| 0.003315
| 0.026519
| 0.083978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3810e0eeb7f7786b4f409fd221396b7804d6a94
| 138
|
py
|
Python
|
entitylinking/mlearning/models/__init__.py
|
debayan/starsem2018-entity-linking
|
6a25ebe5b0a488af400f4c37dadf9cb50aaca1a5
|
[
"Apache-2.0"
] | 56
|
2018-04-17T08:44:26.000Z
|
2022-03-28T00:47:45.000Z
|
entitylinking/mlearning/models/__init__.py
|
debayan/starsem2018-entity-linking
|
6a25ebe5b0a488af400f4c37dadf9cb50aaca1a5
|
[
"Apache-2.0"
] | 12
|
2019-01-26T08:37:16.000Z
|
2020-12-08T16:14:19.000Z
|
entitylinking/mlearning/models/__init__.py
|
debayan/starsem2018-entity-linking
|
6a25ebe5b0a488af400f4c37dadf9cb50aaca1a5
|
[
"Apache-2.0"
] | 16
|
2018-05-01T12:07:17.000Z
|
2021-02-06T09:01:45.000Z
|
from entitylinking.mlearning.models.feature_model import FeatureModel
from entitylinking.mlearning.models.vector_model import VectorModel
| 46
| 69
| 0.898551
| 16
| 138
| 7.625
| 0.625
| 0.278689
| 0.42623
| 0.52459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 138
| 2
| 70
| 69
| 0.938462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a38e81f05874cc2664de2ca13c20682533256cf0
| 10,426
|
py
|
Python
|
download_and_create_reference_datasets/v01/hail_scripts/write_gnomad_coverage_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
download_and_create_reference_datasets/v01/hail_scripts/write_gnomad_coverage_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
download_and_create_reference_datasets/v01/hail_scripts/write_gnomad_coverage_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
import argparse
import hail
from hail.expr import TInt, TDouble, TString
import time
p = argparse.ArgumentParser()
p.add_argument("-b", "--output-bucket", help="Google Storage output bucket", default="seqr-reference-datasets")
args = p.parse_args()
hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S")))
COVERAGE_TSV_PATHS = {
"grch37_exomes": {
"input_paths": [
"gs://gnomad-browser/exomes/coverage/exacv2.chr1.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr10.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr11.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr12.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr13.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr14.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr15.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr16.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr17.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr18.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr19.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr2.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr20.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr21.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr22.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr3.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr4.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr5.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr6.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr7.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr8.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chr9.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chrY.cov.txt.gz",
"gs://gnomad-browser/exomes/coverage/exacv2.chrX.cov.txt.gz",
],
"output_path": "gs://%(output_bucket)s/GRCh37/gnomad/exomes.coverage.vds" % args.__dict__,
},
"grch37_genomes": {
"input_paths": [
"gs://gnomad-browser/genomes/coverage/Panel.chr1.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr10.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr11.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr12.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr13.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr14.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr15.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr16.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr17.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr18.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr19.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr2.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr20.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr21.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr22.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr3.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr4.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr5.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr6.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr7.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr8.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chr9.genome.coverage.txt.gz",
"gs://gnomad-browser/genomes/coverage/Panel.chrX.genome.coverage.txt.gz",
],
"output_path": "gs://%(output_bucket)s/GRCh37/gnomad/genomes.coverage.vds" % args.__dict__,
},
"grch38_exomes": {
"input_paths": [
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr1.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr10.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr11.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr12.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr13.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr14.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr15.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr16.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr17.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr18.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr19.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr2.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr20.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr21.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr22.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr3.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr4.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr5.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr6.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr7.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr8.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chr9.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chrX.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/exacv2.chrY.cov.liftover.GRCh38.txt.gz",
],
"output_path": "gs://%(output_bucket)s/GRCh38/gnomad/exomes.coverage.vds" % args.__dict__,
},
"grch38_genomes": {
"input_paths": [
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr1.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr10.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr11.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr12.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr13.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr14.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr15.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr16.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr17.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr18.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr19.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr2.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr20.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr21.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr22.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr3.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr4.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr5.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr6.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr7.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr8.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chr9.cov.liftover.GRCh38.txt.gz",
"gs://seqr-reference-data/GRCh38/gnomad/coverage/gnomad.chrX.cov.liftover.GRCh38.txt.gz",
],
"output_path": "gs://%(output_bucket)s/GRCh38/gnomad/genomes.coverage.vds" % args.__dict__,
},
}
field_types = {
'#chrom': TString(),
'pos': TInt(),
'mean': TDouble(),
'median': TDouble(),
'1': TDouble(),
'5': TDouble(),
'10': TDouble(),
'15': TDouble(),
'20': TDouble(),
'25': TDouble(),
'30': TDouble(),
'50': TDouble(),
'100': TDouble(),
}
for label, data_paths in COVERAGE_TSV_PATHS.items():
kt = hc.import_table(data_paths["input_paths"], types=field_types).rename({
'#chrom': 'chrom',
'1': 'x1',
'5': 'x5',
'10': 'x10',
'15': 'x15',
'20': 'x20',
'25': 'x25',
'30': 'x30',
'50': 'x50',
'100': 'x100',
})
output_path = data_paths["output_path"]
print("\n\n==> writing out {}".format(output_path))
kt.write(output_path, overwrite=True)
| 62.059524
| 111
| 0.659026
| 1,361
| 10,426
| 5.011021
| 0.087436
| 0.068915
| 0.092375
| 0.130938
| 0.867742
| 0.86305
| 0.836657
| 0.836657
| 0.834897
| 0.675513
| 0
| 0.050507
| 0.158738
| 10,426
| 167
| 112
| 62.431138
| 0.727055
| 0
| 0
| 0.051613
| 0
| 0.451613
| 0.737842
| 0.704556
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6e79c833fa4c5a7a5600376bd5654c036e7e525b
| 14,649
|
py
|
Python
|
idm/trainers.py
|
xmy0916/IDM
|
ab29fbd6d3d8c4650f3dbe41a7d21f745d6167ee
|
[
"MIT"
] | 68
|
2021-07-25T11:56:30.000Z
|
2022-03-29T07:33:02.000Z
|
idm/trainers.py
|
xmy0916/IDM
|
ab29fbd6d3d8c4650f3dbe41a7d21f745d6167ee
|
[
"MIT"
] | 11
|
2021-08-08T09:33:17.000Z
|
2022-01-17T06:29:05.000Z
|
idm/trainers.py
|
xmy0916/IDM
|
ab29fbd6d3d8c4650f3dbe41a7d21f745d6167ee
|
[
"MIT"
] | 10
|
2021-08-13T02:39:55.000Z
|
2022-03-22T07:55:13.000Z
|
from __future__ import print_function, absolute_import
import time
import torch
from .utils.meters import AverageMeter
from .evaluation_metrics import accuracy
from .loss import TripletLoss, CrossEntropyLabelSmooth, TripletLossXBM, DivLoss, BridgeFeatLoss, BridgeProbLoss
class Baseline_Trainer(object):
def __init__(self, model, xbm, num_classes, margin=None):
super(Baseline_Trainer, self).__init__()
self.model = model
self.xbm = xbm
self.num_classes = num_classes
self.criterion_ce = CrossEntropyLabelSmooth(num_classes).cuda()
self.criterion_tri = TripletLoss(margin=margin).cuda()
self.criterion_tri_xbm = TripletLossXBM(margin=margin)
def train(self, epoch, data_loader_source, data_loader_target, source_classes, target_classes,
optimizer, print_freq=50, train_iters=400, use_xbm=False):
self.criterion_ce = CrossEntropyLabelSmooth(source_classes + target_classes).cuda()
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_ce = AverageMeter()
losses_tri = AverageMeter()
losses_xbm = AverageMeter()
precisions_s = AverageMeter()
precisions_t = AverageMeter()
end = time.time()
for i in range(train_iters):
# load data
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
# process inputs
s_inputs, s_targets, _ = self._parse_data(source_inputs)
t_inputs, t_targets, t_indexes = self._parse_data(target_inputs)
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = s_inputs.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
s_inputs, t_inputs = reshape(s_inputs), reshape(t_inputs)
inputs = torch.cat((s_inputs, t_inputs), 1).view(-1, C, H, W)
targets = torch.cat((s_targets.view(device_num, -1), t_targets.view(device_num, -1)), 1)
targets = targets.view(-1)
# forward
prob, feats = self._forward(inputs)
prob = prob[:, 0:source_classes + target_classes]
# split feats
ori_feats = feats.view(device_num, -1, feats.size(-1))
feats_s, feats_t = ori_feats.split(ori_feats.size(1) // 2, dim=1)
ori_feats = torch.cat((feats_s, feats_t), 1).view(-1, ori_feats.size(-1))
# classification+triplet
loss_ce = self.criterion_ce(prob, targets)
loss_tri = self.criterion_tri(ori_feats, targets)
# enqueue and dequeue for xbm
if use_xbm:
self.xbm.enqueue_dequeue(ori_feats.detach(), targets.detach())
xbm_feats, xbm_targets = self.xbm.get()
loss_xbm = self.criterion_tri_xbm(ori_feats, targets, xbm_feats, xbm_targets)
losses_xbm.update(loss_xbm.item())
loss = loss_ce + loss_tri + loss_xbm
else:
loss = loss_ce + loss_tri
optimizer.zero_grad()
loss.backward()
optimizer.step()
ori_prob = prob.view(device_num, -1, prob.size(-1))
prob_s, prob_t = ori_prob.split(ori_prob.size(1) // 2, dim=1)
prob_s, prob_t = prob_s.contiguous(), prob_t.contiguous()
prec_s, = accuracy(prob_s.view(-1, prob_s.size(-1)).data, s_targets.data)
prec_t, = accuracy(prob_t.view(-1, prob_s.size(-1)).data, t_targets.data)
losses.update(loss.item())
losses_ce.update(loss_ce.item())
losses_tri.update(loss_tri.item())
precisions_s.update(prec_s[0])
precisions_t.update(prec_t[0])
# print log
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
if use_xbm:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f}) '
'Data {:.3f} ({:.3f}) '
'Loss {:.3f} ({:.3f}) '
'Loss_ce {:.3f} ({:.3f}) '
'Loss_tri {:.3f} ({:.3f}) '
'Loss_xbm {:.3f} ({:.3f}) '
'Prec_s {:.2%} ({:.2%}) '
'Prec_t {:.2%} ({:.2%}) '
.format(epoch, i + 1, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg,
losses_ce.val, losses_ce.avg,
losses_tri.val, losses_tri.avg,
losses_xbm.val, losses_xbm.avg,
precisions_s.val, precisions_s.avg,
precisions_t.val, precisions_t.avg
))
else:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f}) '
'Data {:.3f} ({:.3f}) '
'Loss {:.3f} ({:.3f}) '
'Loss_ce {:.3f} ({:.3f}) '
'Loss_tri {:.3f} ({:.3f}) '
'Prec_s {:.2%} ({:.2%}) '
'Prec_t {:.2%} ({:.2%}) '
.format(epoch, i + 1, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg,
losses_ce.val, losses_ce.avg,
losses_tri.val, losses_tri.avg,
precisions_s.val, precisions_s.avg,
precisions_t.val, precisions_t.avg
))
def _parse_data(self, inputs):
imgs, _, pids, _, indexes = inputs
return imgs.cuda(), pids.cuda(), indexes.cuda()
def _forward(self, inputs):
return self.model(inputs)
class IDM_Trainer(object):
def __init__(self, model, xbm, num_classes, margin=None, mu1=1.0, mu2=1.0, mu3=1.0):
super(IDM_Trainer, self).__init__()
self.model = model
self.xbm = xbm
self.mu1 = mu1
self.mu2 = mu2
self.mu3 = mu3
self.num_classes = num_classes
self.criterion_ce = BridgeProbLoss(num_classes).cuda()
self.criterion_tri = TripletLoss(margin=margin).cuda()
self.criterion_tri_xbm = TripletLossXBM(margin=margin)
self.criterion_bridge_feat = BridgeFeatLoss()
self.criterion_diverse = DivLoss()
def train(self, epoch, data_loader_source, data_loader_target, source_classes, target_classes,
optimizer, print_freq=50, train_iters=400, use_xbm=False, stage=0):
self.criterion_ce = BridgeProbLoss(source_classes + target_classes).cuda()
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_ce = AverageMeter()
losses_tri = AverageMeter()
losses_xbm = AverageMeter()
losses_bridge_prob = AverageMeter()
losses_bridge_feat = AverageMeter()
losses_diverse = AverageMeter()
precisions_s = AverageMeter()
precisions_t = AverageMeter()
end = time.time()
for i in range(train_iters):
# load data
source_inputs = data_loader_source.next()
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
# process inputs
s_inputs, s_targets, _ = self._parse_data(source_inputs)
t_inputs, t_targets, t_indexes = self._parse_data(target_inputs)
# arrange batch for domain-specific BN
device_num = torch.cuda.device_count()
B, C, H, W = s_inputs.size()
def reshape(inputs):
return inputs.view(device_num, -1, C, H, W)
s_inputs, t_inputs = reshape(s_inputs), reshape(t_inputs)
inputs = torch.cat((s_inputs, t_inputs), 1).view(-1, C, H, W)
targets = torch.cat((s_targets.view(device_num, -1), t_targets.view(device_num, -1)), 1)
targets = targets.view(-1)
# forward
prob, feats, attention_lam= self._forward(inputs, stage) # attention_lam: [B, 2]
prob = prob[:, 0:source_classes + target_classes]
# split feats
ori_feats = feats.view(device_num, -1, feats.size(-1))
feats_s, feats_t, feats_mixed = ori_feats.split(ori_feats.size(1) // 3, dim=1)
ori_feats = torch.cat((feats_s, feats_t), 1).view(-1, ori_feats.size(-1))
# classification+triplet
loss_ce, loss_bridge_prob = self.criterion_ce(prob, targets, attention_lam[:,0].detach())
loss_tri = self.criterion_tri(ori_feats, targets)
loss_diverse = self.criterion_diverse(attention_lam)
feats_s = feats_s.contiguous().view(-1, feats.size(-1))
feats_t = feats_t.contiguous().view(-1, feats.size(-1))
feats_mixed = feats_mixed.contiguous().view(-1, feats.size(-1))
loss_bridge_feat = self.criterion_bridge_feat(feats_s, feats_t, feats_mixed, attention_lam)
# enqueue and dequeue for xbm
if use_xbm:
self.xbm.enqueue_dequeue(ori_feats.detach(), targets.detach())
xbm_feats, xbm_targets = self.xbm.get()
loss_xbm = self.criterion_tri_xbm(ori_feats, targets, xbm_feats, xbm_targets)
losses_xbm.update(loss_xbm.item())
loss = (1.-self.mu1) * loss_ce + loss_tri + loss_xbm + \
self.mu1 * loss_bridge_prob + self.mu2 * loss_bridge_feat + self.mu3 * loss_diverse
else:
loss = (1.-self.mu1) * loss_ce + loss_tri + \
self.mu1 * loss_bridge_prob + self.mu2 * loss_bridge_feat + self.mu3 * loss_diverse
optimizer.zero_grad()
loss.backward()
optimizer.step()
ori_prob = prob.view(device_num, -1, prob.size(-1))
prob_s, prob_t, _ = ori_prob.split(ori_prob.size(1) // 3, dim=1)
prob_s, prob_t = prob_s.contiguous(), prob_t.contiguous()
prec_s, = accuracy(prob_s.view(-1, prob_s.size(-1)).data, s_targets.data)
prec_t, = accuracy(prob_t.view(-1, prob_s.size(-1)).data, t_targets.data)
losses.update(loss.item())
losses_ce.update(loss_ce.item())
losses_tri.update(loss_tri.item())
losses_bridge_prob.update(loss_bridge_prob.item())
losses_bridge_feat.update(loss_bridge_feat.item())
losses_diverse.update(loss_diverse.item())
precisions_s.update(prec_s[0])
precisions_t.update(prec_t[0])
# print log
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
if use_xbm:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f}) '
'Data {:.3f} ({:.3f}) '
'Loss {:.3f} ({:.3f}) '
'Loss_ce {:.3f} ({:.3f}) '
'Loss_tri {:.3f} ({:.3f}) '
'Loss_xbm {:.3f} ({:.3f}) '
'Loss_bridge_prob {:.3f} ({:.3f}) '
'Loss_bridge_feat {:.3f} ({:.3f}) '
'Loss_diverse {:.3f} ({:.3f}) '
'Prec_s {:.2%} ({:.2%}) '
'Prec_t {:.2%} ({:.2%}) '
.format(epoch, i + 1, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg,
losses_ce.val, losses_ce.avg,
losses_tri.val, losses_tri.avg,
losses_xbm.val, losses_xbm.avg,
losses_bridge_prob.val, losses_bridge_prob.avg,
losses_bridge_feat.val, losses_bridge_feat.avg,
losses_diverse.val, losses_diverse.avg,
precisions_s.val, precisions_s.avg,
precisions_t.val, precisions_t.avg
))
else:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f}) '
'Data {:.3f} ({:.3f}) '
'Loss {:.3f} ({:.3f}) '
'Loss_ce {:.3f} ({:.3f}) '
'Loss_tri {:.3f} ({:.3f}) '
'Loss_bridge_prob {:.3f} ({:.3f}) '
'Loss_bridge_feat {:.3f} ({:.3f}) '
'Loss_diverse {:.3f} ({:.3f}) '
'Prec_s {:.2%} ({:.2%}) '
'Prec_t {:.2%} ({:.2%}) '
.format(epoch, i + 1, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg,
losses_ce.val, losses_ce.avg,
losses_tri.val, losses_tri.avg,
losses_bridge_prob.val, losses_bridge_prob.avg,
losses_bridge_feat.val, losses_bridge_feat.avg,
losses_diverse.val, losses_diverse.avg,
precisions_s.val, precisions_s.avg,
precisions_t.val, precisions_t.avg
))
def _parse_data(self, inputs):
imgs, _, pids, _, indexes = inputs
return imgs.cuda(), pids.cuda(), indexes.cuda()
def _forward(self, inputs, stage):
return self.model(inputs, stage=stage)
| 45.073846
| 111
| 0.503379
| 1,603
| 14,649
| 4.318777
| 0.081098
| 0.016178
| 0.023111
| 0.020222
| 0.862198
| 0.851654
| 0.83952
| 0.823342
| 0.795031
| 0.795031
| 0
| 0.019481
| 0.376271
| 14,649
| 324
| 112
| 45.212963
| 0.738207
| 0.020957
| 0
| 0.804688
| 0
| 0
| 0.066462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039063
| false
| 0
| 0.023438
| 0.015625
| 0.09375
| 0.035156
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6ea7d1960c847c1c0aa183f58e5d8241c710d5d8
| 14,034
|
py
|
Python
|
test/firmware_test/firmware_test.py
|
Createcafe3d/YXE-firmware-flash
|
4c03fceeedafd9f3c801111d8ee9a8d614e53ae0
|
[
"MIT"
] | 1
|
2017-03-08T02:47:17.000Z
|
2017-03-08T02:47:17.000Z
|
test/firmware_test/firmware_test.py
|
Createcafe3d/YXE-firmware-flash
|
4c03fceeedafd9f3c801111d8ee9a8d614e53ae0
|
[
"MIT"
] | null | null | null |
test/firmware_test/firmware_test.py
|
Createcafe3d/YXE-firmware-flash
|
4c03fceeedafd9f3c801111d8ee9a8d614e53ae0
|
[
"MIT"
] | 8
|
2016-05-11T11:38:59.000Z
|
2020-02-15T09:55:39.000Z
|
import sys
import os
from mock import patch, MagicMock
import unittest
from subprocess import PIPE
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
import firmware
from firmware.firmware import FirmwareUpdater, MacFirmwareUpdater, LinuxFirmwareUpdater, WindowsFirmwareUpdater
@patch('firmware.sys')
class TestFirmwareInit(unittest.TestCase):
def test_correct_class_for_mac_platform_is_provided(self, mock_sys):
mock_sys.platform = 'darwin'
result = firmware.get_firmware_updater()
self.assertEquals(MacFirmwareUpdater, type(result))
def test_correct_class_for_win32_platform_is_provided(self, mock_sys):
mock_sys.platform = 'win32'
result = firmware.get_firmware_updater()
self.assertEquals(WindowsFirmwareUpdater, type(result))
def test_correct_class_for_win64_platform_is_provided(self, mock_sys):
mock_sys.platform = 'winamd64'
result = firmware.get_firmware_updater()
self.assertEquals(WindowsFirmwareUpdater, type(result))
def test_correct_class_for_linux_platform_is_provided(self, mock_sys):
mock_sys.platform = 'linux'
result = firmware.get_firmware_updater()
self.assertEquals(LinuxFirmwareUpdater, type(result))
def test_exception_raised_if_not_supported(self, mock_sys):
mock_sys.platform = 'sun'
with self.assertRaises(Exception):
firmware.get_firmware_updater()
@patch('firmware.firmware.Popen')
@patch('firmware.os.path.isfile')
@patch('firmware.os.stat')
@patch('firmware.os.chmod')
class TestLinuxFirmwareUpdater(unittest.TestCase):
BOOTLOADER_IDVENDOR = 0x0483
BOOTLOADER_IDPRODUCT = 0xdf11
PEACHY_IDVENDOR = 0x16d0
PEACHY_IDPRODUCT = 0x0af3
def setUp(self):
self.bin_path = os.path.join('some','binary', 'path')
self.firmware_path = os.path.join('some', 'firmware', 'path.bin')
def test_update_should_return_true_if_update_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_isfile.return_value = True
mock_Popen.return_value.communicate.return_value = ('err', 'out')
mock_Popen.return_value.wait.return_value = 0
usb_addess = '{}:{}'.format('0483', 'df11')
expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = l_fw_up.update(self.firmware_path)
self.assertTrue(result)
mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
mock_Popen.return_value.wait.assert_called_with()
def test_update_should_return_false_if_update_not_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_isfile.return_value = True
mock_Popen.return_value.communicate.return_value = ('err', 'out')
mock_Popen.return_value.wait.return_value = 34
usb_addess = '{}:{}'.format('0483', 'df11')
expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = l_fw_up.update(self.firmware_path)
self.assertFalse(result)
mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
mock_Popen.return_value.wait.assert_called_with()
def test_check_ready_should_return_true_if_1_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertTrue(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_no_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('', '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_only_peachy_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_peachy_and_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{:04x}:{:04x}\n{:04x}:{:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_peachys(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{0:04x}:{1:04x}\n{0:04x}:{1:04x}'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_bootloaders(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('{0:04x}:{1:04x}\n{0:04x}:{1:04x}'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = LinuxFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with(['lsusb'], stdout=PIPE, stderr=PIPE)
@patch('firmware.firmware.Popen')
@patch('firmware.os.path.isfile')
@patch('firmware.os.stat')
@patch('firmware.os.chmod')
class TestWindowsFirmwareUpdater(unittest.TestCase):
BOOTLOADER_IDVENDOR = 0x0483
BOOTLOADER_IDPRODUCT = 0xdf11
PEACHY_IDVENDOR = 0x16d0
PEACHY_IDPRODUCT = 0x0af3
def setUp(self):
self.bin_path = os.path.join('some','binary', 'path')
self.firmware_path = os.path.join('some', 'firmware', 'path.bin')
# def test_update_should_return_true_if_update_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
# mock_isfile.return_value = True
# mock_Popen.return_value.communicate.return_value = ('err', 'out')
# mock_Popen.return_value.wait.return_value = 0
# usb_addess = '{}:{}'.format('0483', 'df11')
# expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
# l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
# result = l_fw_up.update(self.firmware_path)
# self.assertTrue(result)
# mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
# mock_Popen.return_value.wait.assert_called_with()
# def test_update_should_return_false_if_update_not_successfull(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
# mock_isfile.return_value = True
# mock_Popen.return_value.communicate.return_value = ('err', 'out')
# mock_Popen.return_value.wait.return_value = 34
# usb_addess = '{}:{}'.format('0483', 'df11')
# expected_command = [os.path.join(self.bin_path, 'dfu-util'), '-a', '0', '--dfuse-address', '0x08000000', '-D', self.firmware_path, '-d', usb_addess]
# l_fw_up = LinuxFirmwareUpdater(self.bin_path, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
# result = l_fw_up.update(self.firmware_path)
# self.assertFalse(result)
# mock_Popen.assert_called_with(expected_command, stdout=PIPE, stderr=PIPE)
# mock_Popen.return_value.wait.assert_called_with()
def test_check_ready_should_return_true_if_1_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertTrue(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_no_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('', '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_return_False_if_only_peachy_results(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
result = fw_up.check_ready()
self.assertFalse(result)
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_peachy_and_bootloader(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{:04X}&PID_{:04X}"\n"USB\VID_{:04X}&PID_{:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT, self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_peachys(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{0:04X}&PID_{1:04X}"\n"USB\VID_{0:04X}&PID_{1:04X}"'.format(self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
def test_check_ready_should_raise_exception_if_multipule_bootloaders(self, mock_chmod, mock_stat, mock_isfile, mock_Popen):
mock_Popen.return_value.communicate.return_value = ('"USB\VID_{0:04X}&PID_{1:04X}"\n"USB\VID_{0:04X}&PID_{1:04X}"'.format(self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT), '')
mock_Popen.return_value.wait.return_value = 0
fw_up = WindowsFirmwareUpdater('somepath', self.BOOTLOADER_IDVENDOR, self.BOOTLOADER_IDPRODUCT, self.PEACHY_IDVENDOR, self.PEACHY_IDPRODUCT)
with self.assertRaises(Exception):
fw_up.check_ready()
mock_Popen.assert_called_with('''wmic.exe path WIN32_PnPEntity where "DeviceID like 'USB\\\\VID_%'" get HardwareID''', stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
unittest.main()
| 55.251969
| 228
| 0.736568
| 1,808
| 14,034
| 5.361726
| 0.072456
| 0.0817
| 0.055705
| 0.074273
| 0.947184
| 0.944914
| 0.942232
| 0.929028
| 0.929028
| 0.910563
| 0
| 0.017972
| 0.14358
| 14,034
| 253
| 229
| 55.470356
| 0.788585
| 0.12313
| 0
| 0.753086
| 0
| 0.018519
| 0.110568
| 0.033708
| 0
| 0
| 0.005537
| 0
| 0.216049
| 1
| 0.12963
| false
| 0
| 0.04321
| 0
| 0.240741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42e451a9820c75d1c5187a731488067714a5351c
| 204
|
py
|
Python
|
docassemble_demo/docassemble/demo/change_suffix.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 568
|
2016-01-08T19:05:06.000Z
|
2022-03-30T19:44:47.000Z
|
docassemble_demo/docassemble/demo/change_suffix.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 348
|
2016-01-25T02:17:36.000Z
|
2022-03-27T21:22:43.000Z
|
docassemble_demo/docassemble/demo/change_suffix.py
|
knod/docassemble
|
bd052b557743d098138a5f2129a9d3c2f68090a6
|
[
"MIT"
] | 262
|
2016-01-14T23:09:50.000Z
|
2022-03-23T15:06:08.000Z
|
import docassemble.base.functions
def my_name_suffix():
return ['Jr', 'Sr', 'II', 'III', 'IV', 'Esq', 'PhD']
docassemble.base.functions.update_language_function('en', 'name_suffix', my_name_suffix)
| 29.142857
| 88
| 0.710784
| 28
| 204
| 4.928571
| 0.714286
| 0.217391
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102941
| 204
| 6
| 89
| 34
| 0.754098
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
6e1ad5ffcca75c1163504cb93f3d7468a32f4921
| 16,506
|
py
|
Python
|
sinkhorn_barycenters.py
|
hichamjanati/debiased-ot-barycenters
|
632adefbc6c9e98ea237ac86feab921dc8d00778
|
[
"BSD-3-Clause"
] | 15
|
2020-06-06T02:56:58.000Z
|
2021-12-06T05:09:22.000Z
|
sinkhorn_barycenters.py
|
hichamjanati/debiased-ot-barycenters
|
632adefbc6c9e98ea237ac86feab921dc8d00778
|
[
"BSD-3-Clause"
] | 1
|
2021-09-10T12:04:48.000Z
|
2021-09-10T14:49:55.000Z
|
sinkhorn_barycenters.py
|
hichamjanati/debiased-ot-barycenters
|
632adefbc6c9e98ea237ac86feab921dc8d00778
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Debiased Sinkhorn barycenters.
"""
#
# License: MIT License
import torch
import numpy as np
import warnings
def convol_imgs(imgs, K):
kx = torch.einsum("...ij,kjl->kil", K, imgs)
kxy = torch.einsum("...ij,klj->kli", K, kx)
return kxy
def convol_3d(cloud, K):
kx = torch.einsum("ij,rjlk->rilk", K, cloud)
kxy = torch.einsum("ij,rkjl->rkil", K, kx)
kxyz = torch.einsum("ij,rlkj->rlki", K, kxy)
return kxyz
def barycenter_3d(P, K, Kb=None, c=None, maxiter=1000, tol=1e-7,
debiased=False, weights=None, return_log=False):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _, _ = P.shape
b = torch.ones_like(P, requires_grad=False)
q = torch.ones((width, width, width), device=P.device, dtype=P.dtype)
if Kb is None:
Kb = convol_3d(b, K)
if c is None:
c = q.clone()
log = {'err': [], 'a': [], 'b': [], 'q': []}
err = 10
if weights is None:
weights = torch.ones(n_hists, device=P.device, dtype=P.dtype) / n_hists
for ii in range(maxiter):
if torch.isnan(q).any():
break
qold = q.clone()
a = P / Kb
Ka = convol_3d(a, K.t())
q = c * torch.prod((Ka) ** weights[:, None, None, None], dim=0)
if debiased:
Kc = convol_3d(c[None, :], K).squeeze()
c = (c * q / Kc) ** 0.5
Q = q[None, :]
b = Q / Ka
Kb = convol_3d(b, K)
err = abs(q - qold).max()
if err < tol and ii > 10:
break
print("Barycenter 3d | err = ", err)
if return_log:
log["err"].append(err)
log["a"] = a
log["q"] = q
log["b"] = b
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def barycenter_debiased_1d(P, K, maxiter=5000, tol=1e-5,
weights=None, return_log=False):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
bold = torch.ones_like(P, device=P.device)
b = bold.clone()
c, q = torch.ones((2, dim), dtype=P.dtype, device=P.device)
Kb = K.mm(b)
log = {'err': [], 'a': [], 'b': [], 'c': [], 'q': []}
err = 10
if weights is None:
weights = torch.ones(n_hists, dtype=P.dtype, device=P.device) / n_hists
for ii in range(maxiter):
qold = q.clone()
a = P / Kb
Ka = K.t().mm(a)
q = c * torch.prod((Ka) ** weights[None, :], dim=1)
c = (c * q / K.mv(c)) ** 0.5
Q = q[:, None]
b = Q / Ka
Kb = K.mm(b)
# err = abs(a * Kb - P).mean()
err = abs(q - qold).max()
if return_log:
log["err"].append(err)
log["a"].append(a)
log["q"].append(q)
log["c"].append(c)
log["b"].append(b)
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def ot_diag_2d(q, K, maxiter=100, tol=1e-5):
"""Computes Auto-correlation potential for 2d distributions."""
c = torch.ones_like(q)
for ii in range(maxiter):
Kc = K.t().mm(K.mm(c).t()).t()
c_new = (c * q / Kc) ** 0.5
err = abs(c - c_new).max()
err /= max(c.max(), c_new.max(), 1.)
c = c_new.clone()
if err < tol and ii > 3:
break
if ii == maxiter - 1:
warnings.warn("*** Auto-correlation potential "
"did not converge ! err = {} ***".format(err))
return c
def ot_diag_1d(c, q, K, maxiter=100, tol=1e-5):
"""Computes Auto-correlation potential for 2d distributions."""
c = torch.ones_like(q)
for ii in range(maxiter):
Kc = K.mv(c)
c_new = (c * q / Kc) ** 0.5
err = abs(c - c_new).max()
err /= max(c.max(), c_new.max(), 1.)
c = c_new.clone()
if err < tol and ii > 3:
break
if ii == maxiter - 1:
warnings.warn("*** Auto-correlation potential "
"did not converge ! err = {} ***".format(err))
return c
def ot_diag_2d_np(c, q, K, maxiter=100, tol=1e-5):
"""Computes Auto-correlation potential for 2d distributions."""
for ii in range(maxiter):
Kc = K.T.dot(K.dot(c).T).T
c_new = (c * q / Kc) ** 0.5
err = abs(c - c_new).max()
err /= max(c.max(), c_new.max(), 1.)
c = c_new.copy()
if err < tol and ii > 3:
break
if ii == maxiter - 1:
warnings.warn("*** Auto-correlation potential "
"did not converge ! err = {} ***".format(err))
return c
def ot_diag_1d_np(q, K, maxiter=100, tol=1e-5):
"""Computes Auto-correlation potential for 2d distributions."""
c = np.ones_like(q)
for ii in range(maxiter):
c_new = (c * q / K.dot(c)) ** 0.5
err = abs(c - c_new).max()
err /= max(c.max(), c_new.max(), 1.)
c = c_new.copy()
if err < tol and ii > 3:
break
if ii == maxiter - 1:
warnings.warn("*** Auto-correlation potential "
"did not converge ! err = {} ***".format(err))
return c
def barycenter_1d(P, K, maxiter=5000, tol=1e-5,
weights=None, return_log=False):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
b = torch.ones_like(P, device=P.device)
q = torch.ones(dim, dtype=P.dtype, device=P.device)
Kb = K.mm(b)
err = 1
log = {'err': [err], 'a': [], 'b': [], 'c': [], 'q': []}
if weights is None:
weights = torch.ones(n_hists, dtype=P.dtype, device=P.device) / n_hists
for ii in range(maxiter):
qold = q.clone()
a = P / Kb
Ka = K.t().mm(a)
q = torch.prod((b * Ka) ** weights[None, :], dim=1)
Q = q[:, None]
b = Q / Ka
Kb = K.mm(b)
# err = abs(a * Kb - P).mean()
err = abs(q - qold).max()
if err < tol and ii > 10:
break
if return_log:
log["err"].append(err)
log["a"].append(a)
log["q"].append(q)
log["b"].append(b)
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def barycenter_debiased_2d(P, K, Kb=None, c=None, maxiter=5000, tol=1e-5,
weights=None, return_log=False):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
b = torch.ones_like(P, requires_grad=False)
q = torch.ones((width, width), dtype=P.dtype, device=P.device)
if Kb is None:
Kb = convol_imgs(b, K)
if c is None:
c = q.clone()
log = {'err': [], 'a': [], 'b': [], 'q': []}
err = 10
if weights is None:
weights = torch.ones(n_hists, dtype=P.dtype, device=P.device) / n_hists
for ii in range(maxiter):
qold = q.clone()
a = P / Kb
Ka = convol_imgs(a, K.t())
q = c * torch.prod((Ka) ** weights[:, None, None], dim=0)
for kk in range(10):
Kc = K.t().mm(K.mm(c).t()).t()
c = (c * q / Kc) ** 0.5
Q = q[None, :, :]
b = Q / Ka
Kb = convol_imgs(b, K)
# err = abs(a * Kb - P).mean()
err = abs(q - qold).max()
if err < tol and ii > 10:
break
if return_log:
log["err"].append(err)
log["a"] = a
log["q"] = q
log["b"] = b
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def barycenter_2d(P, K, Kb=None, maxiter=5000, tol=1e-5,
weights=None, return_log=False):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
b = torch.ones_like(P, requires_grad=False)
q = torch.ones((width, width), dtype=P.dtype, device=P.device)
if Kb is None:
Kb = convol_imgs(b, K)
log = {'err': [], 'a': [], 'b': [], 'q': []}
err = 10
if weights is None:
weights = torch.ones(n_hists, dtype=P.dtype, device=P.device) / n_hists
for ii in range(maxiter):
qold = q.clone()
a = P / Kb
Ka = convol_imgs(a, K.t())
q = torch.prod((b * Ka) ** weights[:, None, None], dim=0)
Q = q[None, :, :]
b = Q / Ka
Kb = convol_imgs(b, K)
err = abs(q - qold).max()
if err < tol and ii > 10:
break
if return_log:
log["err"].append(err)
log["a"] = a
log["q"] = q
log["b"] = b
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def barycenter(P, K, reference="debiased", **kwargs):
"""Compute OT barycenter."""
ndim = P.ndimension()
if ndim > 3 or ndim <= 1:
raise ValueError("Data dimension must be 2 for 1d distributions"
" or 3 for 2d distributions.")
if reference == "debiased":
if ndim == 2:
func = barycenter_debiased_1d
elif ndim == 3:
func = barycenter_debiased_2d
elif reference == "uniform":
if ndim == 2:
func = barycenter_1d
elif ndim == 3:
func = barycenter_2d
elif reference == "product":
if ndim == 2:
func = barycenter_ref_1d
else:
func = barycenter_ref_2d
return func(P, K, **kwargs)
def barycenter_np_debiased_1d(P, K, maxiter=5000, tol=1e-5,
weights=None, return_log=True):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
bold = np.ones_like(P)
b = bold.copy()
q, c = np.ones((2, dim))
Kb = K.dot(b)
log = {'err': [], 'a': [], 'b': [], 'c': [], 'q': []}
err = 10
if weights is None:
weights = np.ones(n_hists) / n_hists
for ii in range(maxiter):
qold = q.copy()
a = P / Kb
Ka = K.T.dot(a)
q = c * np.prod(Ka ** weights[None, :], axis=1)
Q = q[:, None]
b = Q / Ka
Kb = K.dot(b)
c = (c * q / K.dot(c)) ** 0.5
err = abs(q - qold).max()
if return_log:
log["err"].append(err)
log["a"].append(a)
log["q"].append(q)
log["c"].append(c)
log["b"].append(b)
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def barycenter_np_1d(P, K, maxiter=5000, tol=1e-5,
weights=None, return_log=True):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
bold = np.ones_like(P)
b = bold.copy()
q = np.ones(dim)
Kb = K.dot(b)
log = {'err': [], 'a': [], 'b': [], 'c': [], 'q': []}
err = 10
if weights is None:
weights = np.ones(n_hists) / n_hists
for ii in range(maxiter):
qold = q.copy()
a = P / Kb
Ka = K.T.dot(a)
q = np.prod((b * Ka) ** weights[None, :], axis=1)
Q = q[:, None]
# err = abs(Ka * b).std(axis=1).mean()
b = Q / Ka
Kb = K.dot(b)
err = abs(q - qold).max()
if return_log:
log["err"].append(err)
log["a"].append(a)
log["q"].append(q)
log["b"].append(b)
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
if return_log:
return q, log
return q
def _barycenter_inner_1d_np(P, K, qold=None, bold=None, maxiter=1000,
tol=1e-5, weights=None):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
if bold is None:
bold = np.ones_like(P)
b = bold.copy()
if qold is None:
qold = np.ones(dim) / dim
Kb = K.dot(b)
err = 10
if weights is None:
weights = np.ones(n_hists) / n_hists
for ii in range(maxiter):
a = P / Kb
Ka = K.T.dot(a)
q = qold * np.prod((Ka) ** weights[None, :], axis=1)
Q = q[:, None]
err = abs(Ka * b).std(axis=1).mean()
b = Q / Ka
Kb = K.dot(b)
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q, b
def _barycenter_inner_1d(P, K, qold=None, bold=None, maxiter=1000,
tol=1e-4, weights=None):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
if bold is None:
bold = torch.ones_like(P)
b = bold.clone()
if qold is None:
qold = torch.ones(dim) / dim
Kb = K.mm(b)
err = 10
if weights is None:
weights = torch.ones(n_hists) / n_hists
q = qold.clone()
for ii in range(maxiter):
qold_inner = q.clone()
a = P / Kb
Ka = K.t().mm(a)
q = qold * torch.prod((Ka) ** weights[None, :], dim=1)
Q = q[:, None]
b = Q / Ka
Kb = K.mm(b)
err = abs(q - qold_inner).max()
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q, b
def _barycenter_inner_2d(P, K, qold=None, bold=None, maxiter=1000,
tol=1e-4, weights=None):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
if bold is None:
bold = torch.ones_like(P, requires_grad=False)
b = bold.clone()
Kb = convol_imgs(b, K)
if weights is None:
weights = torch.ones(n_hists, dtype=P.dtype, device=P.device) / n_hists
if qold is None:
qold = torch.ones_like(P[0]) / (width ** 2)
q = qold.clone()
for ii in range(maxiter):
qlocal = q.clone()
a = P / Kb
Ka = convol_imgs(a, K.t())
q = qold * torch.prod(Ka ** weights[:, None, None], dim=0)
Q = q[None, :, :]
b = Q / Ka
Kb = convol_imgs(b, K)
err = abs(q - qlocal).max()
if err < tol and ii > 10:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q, b
def barycenter_ref_1d(P, K, maxiter=500, tol=1e-5,
weights=None, return_log=True):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
q = torch.ones(dim) / dim
b = torch.ones_like(P)
for ii in range(maxiter):
qold = q.clone()
q, b = _barycenter_inner_1d(P, K, qold=q, bold=b)
err = abs(q - qold).max()
if err < tol:
break
return q
def barycenter_ref_2d(P, K, maxiter=500, tol=1e-5,
weights=None, return_log=True):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
q = torch.ones((width, width), device=P.device, dtype=P.dtype)
b = torch.ones_like(P)
for ii in range(maxiter):
qold = q.clone()
q, b = _barycenter_inner_2d(P, K, qold=q, bold=b)
err = abs(q - qold).max()
if err < tol:
break
return q
def barycenter_ref_1d_np(P, K, maxiter=500, tol=1e-5,
weights=None, return_log=True):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
dim, n_hists = P.shape
q = np.ones(dim) / dim
b = np.ones_like(P)
for ii in range(maxiter):
qold = q.copy()
q, b = _barycenter_inner_1d_np(P, K, qold=q, bold=b)
err = abs(q - qold).max()
if err < tol:
break
return q
def barycenter_np(P, K, debiased=True, **kwargs):
if debiased:
func = barycenter_np_debiased_1d
else:
func = barycenter_np_1d
return func(P, K, **kwargs)
| 29.214159
| 79
| 0.507937
| 2,373
| 16,506
| 3.456384
| 0.061526
| 0.02414
| 0.014509
| 0.024872
| 0.887832
| 0.857108
| 0.842721
| 0.809559
| 0.781151
| 0.776518
| 0
| 0.021638
| 0.333636
| 16,506
| 564
| 80
| 29.265957
| 0.724066
| 0.083364
| 0
| 0.778523
| 0
| 0
| 0.057833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04698
| false
| 0
| 0.006711
| 0
| 0.116331
| 0.002237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
281a1ca4add71229dc7b87ab2e0255bb29ff30bd
| 175
|
py
|
Python
|
apps/general/views.py
|
cspatgithub/Bloggy
|
1ce62811cc24e0a108cb6b7b445f50f0c123aeb2
|
[
"MIT"
] | null | null | null |
apps/general/views.py
|
cspatgithub/Bloggy
|
1ce62811cc24e0a108cb6b7b445f50f0c123aeb2
|
[
"MIT"
] | 6
|
2021-03-19T02:38:48.000Z
|
2021-09-22T18:57:13.000Z
|
apps/general/views.py
|
cspatgithub/Bloggy
|
1ce62811cc24e0a108cb6b7b445f50f0c123aeb2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def Home(request):
return render(request, 'general/home.html')
def About(request):
return render(request, 'general/about.html')
| 19.444444
| 48
| 0.737143
| 23
| 175
| 5.608696
| 0.521739
| 0.20155
| 0.294574
| 0.403101
| 0.511628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 175
| 9
| 48
| 19.444444
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.198864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
284be3a20d5345d9e1ad61f64e009548c796680b
| 176
|
py
|
Python
|
descarteslabs/workflows/execution/__init__.py
|
descarteslabs/descarteslabs-python
|
efc874d6062603dc424c9646287a9b1f8636e7ac
|
[
"Apache-2.0"
] | 167
|
2017-03-23T22:16:58.000Z
|
2022-03-08T09:19:30.000Z
|
descarteslabs/workflows/execution/__init__.py
|
descarteslabs/descarteslabs-python
|
efc874d6062603dc424c9646287a9b1f8636e7ac
|
[
"Apache-2.0"
] | 93
|
2017-03-23T22:11:40.000Z
|
2021-12-13T18:38:53.000Z
|
descarteslabs/workflows/execution/__init__.py
|
descarteslabs/descarteslabs-python
|
efc874d6062603dc424c9646287a9b1f8636e7ac
|
[
"Apache-2.0"
] | 46
|
2017-03-25T19:12:14.000Z
|
2021-08-15T18:04:29.000Z
|
from .arguments import arguments_to_grafts, promote_arguments
from .to_computable import to_computable
__all__ = ["arguments_to_grafts", "promote_arguments", "to_computable"]
| 35.2
| 71
| 0.835227
| 22
| 176
| 6.090909
| 0.363636
| 0.246269
| 0.253731
| 0.358209
| 0.492537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085227
| 176
| 4
| 72
| 44
| 0.832298
| 0
| 0
| 0
| 0
| 0
| 0.278409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2886d18cb9b0df3bd118dd252967071d9df3ba65
| 145
|
py
|
Python
|
semantic-python/test/fixtures/3-01-empty-class-definition.py
|
Temurson/semantic
|
2e9cd2c006cec9a0328791e47d8c6d60af6d5a1b
|
[
"MIT"
] | 8,844
|
2019-05-31T15:47:12.000Z
|
2022-03-31T18:33:51.000Z
|
semantic-python/test/fixtures/3-01-empty-class-definition.py
|
Qanora/semantic
|
b0eda9a61bbc690a342fb177cfc12eec8c1c001c
|
[
"MIT"
] | 401
|
2019-05-31T18:30:26.000Z
|
2022-03-31T16:32:29.000Z
|
semantic-python/test/fixtures/3-01-empty-class-definition.py
|
Qanora/semantic
|
b0eda9a61bbc690a342fb177cfc12eec8c1c001c
|
[
"MIT"
] | 504
|
2019-05-31T17:55:03.000Z
|
2022-03-30T04:15:04.000Z
|
# CHECK-TREE: { Foo <- rec Foo = __semantic_prelude.type "Foo" __semantic_prelude.object #record {}; #record { Foo: Foo }}
class Foo():
pass
| 36.25
| 122
| 0.668966
| 19
| 145
| 4.789474
| 0.578947
| 0.241758
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 145
| 3
| 123
| 48.333333
| 0.758333
| 0.813793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
953f4fda908710036f4aca3782c38572b035d1f8
| 66,411
|
py
|
Python
|
atom/nucleus/python/nucleus_api/api/insurance_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/insurance_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/insurance_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nucleus_api.api_client import ApiClient
class InsuranceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_insurance_coverage_using_post(self, insurance_coverage, **kwargs): # noqa: E501
"""Create a insurance coverage request # noqa: E501
Create a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_coverage_using_post(insurance_coverage, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceCoverage insurance_coverage: insuranceCoverage (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_insurance_coverage_using_post_with_http_info(insurance_coverage, **kwargs) # noqa: E501
else:
(data) = self.create_insurance_coverage_using_post_with_http_info(insurance_coverage, **kwargs) # noqa: E501
return data
def create_insurance_coverage_using_post_with_http_info(self, insurance_coverage, **kwargs): # noqa: E501
"""Create a insurance coverage request # noqa: E501
Create a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_coverage_using_post_with_http_info(insurance_coverage, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceCoverage insurance_coverage: insuranceCoverage (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_coverage'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_insurance_coverage_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_coverage' is set
if ('insurance_coverage' not in params or
params['insurance_coverage'] is None):
raise ValueError("Missing the required parameter `insurance_coverage` when calling `create_insurance_coverage_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_coverage' in params:
body_params = params['insurance_coverage']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_coverage', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceCoverage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_insurance_discount_using_post(self, insurance_discount, **kwargs): # noqa: E501
"""Create a insurance discount request # noqa: E501
Create a new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_discount_using_post(insurance_discount, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceDiscount insurance_discount: insuranceDiscount (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_insurance_discount_using_post_with_http_info(insurance_discount, **kwargs) # noqa: E501
else:
(data) = self.create_insurance_discount_using_post_with_http_info(insurance_discount, **kwargs) # noqa: E501
return data
def create_insurance_discount_using_post_with_http_info(self, insurance_discount, **kwargs): # noqa: E501
"""Create a insurance discount request # noqa: E501
Create a new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_discount_using_post_with_http_info(insurance_discount, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceDiscount insurance_discount: insuranceDiscount (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_discount'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_insurance_discount_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_discount' is set
if ('insurance_discount' not in params or
params['insurance_discount'] is None):
raise ValueError("Missing the required parameter `insurance_discount` when calling `create_insurance_discount_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_discount' in params:
body_params = params['insurance_discount']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_discount', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceDiscount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_insurance_quote_using_post(self, insurance_quote, **kwargs): # noqa: E501
"""Create a insuranceQuote request # noqa: E501
Create a new insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_quote_using_post(insurance_quote, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceQuote insurance_quote: insuranceQuote (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_insurance_quote_using_post_with_http_info(insurance_quote, **kwargs) # noqa: E501
else:
(data) = self.create_insurance_quote_using_post_with_http_info(insurance_quote, **kwargs) # noqa: E501
return data
def create_insurance_quote_using_post_with_http_info(self, insurance_quote, **kwargs): # noqa: E501
"""Create a insuranceQuote request # noqa: E501
Create a new insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_insurance_quote_using_post_with_http_info(insurance_quote, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceQuote insurance_quote: insuranceQuote (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_quote'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_insurance_quote_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_quote' is set
if ('insurance_quote' not in params or
params['insurance_quote'] is None):
raise ValueError("Missing the required parameter `insurance_quote` when calling `create_insurance_quote_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_quote' in params:
body_params = params['insurance_quote']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_quote', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceQuote', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_insurance_coverage_using_delete(self, insurance_coverage_id, **kwargs): # noqa: E501
"""Delete an insurance coverage request # noqa: E501
Delete an insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_coverage_using_delete(insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_insurance_coverage_using_delete_with_http_info(insurance_coverage_id, **kwargs) # noqa: E501
else:
(data) = self.delete_insurance_coverage_using_delete_with_http_info(insurance_coverage_id, **kwargs) # noqa: E501
return data
def delete_insurance_coverage_using_delete_with_http_info(self, insurance_coverage_id, **kwargs): # noqa: E501
"""Delete an insurance coverage request # noqa: E501
Delete an insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_coverage_using_delete_with_http_info(insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_coverage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_insurance_coverage_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_coverage_id' is set
if ('insurance_coverage_id' not in params or
params['insurance_coverage_id'] is None):
raise ValueError("Missing the required parameter `insurance_coverage_id` when calling `delete_insurance_coverage_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_coverage_id' in params:
path_params['insurance_coverage_id'] = params['insurance_coverage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_coverage/{insurance_coverage_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_insurance_discount_using_delete(self, insurance_discount_id, **kwargs): # noqa: E501
"""Delete an insurance discount request # noqa: E501
Delete an insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_discount_using_delete(insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_insurance_discount_using_delete_with_http_info(insurance_discount_id, **kwargs) # noqa: E501
else:
(data) = self.delete_insurance_discount_using_delete_with_http_info(insurance_discount_id, **kwargs) # noqa: E501
return data
def delete_insurance_discount_using_delete_with_http_info(self, insurance_discount_id, **kwargs): # noqa: E501
"""Delete an insurance discount request # noqa: E501
Delete an insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_discount_using_delete_with_http_info(insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_discount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_insurance_discount_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_discount_id' is set
if ('insurance_discount_id' not in params or
params['insurance_discount_id'] is None):
raise ValueError("Missing the required parameter `insurance_discount_id` when calling `delete_insurance_discount_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_discount_id' in params:
path_params['insurance_discount_id'] = params['insurance_discount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_discount/{insurance_discount_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_insurance_quote_using_delete(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Delete a insuranceQuote request # noqa: E501
Permanently delete a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_quote_using_delete(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_quote: UUID insurance_quote_id (required)
:param str insurance_quote_id: insurance_quote_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_insurance_quote_using_delete_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
else:
(data) = self.delete_insurance_quote_using_delete_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
return data
def delete_insurance_quote_using_delete_with_http_info(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Delete a insuranceQuote request # noqa: E501
Permanently delete a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_insurance_quote_using_delete_with_http_info(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_quote: UUID insurance_quote_id (required)
:param str insurance_quote_id: insurance_quote_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_quote', 'insurance_quote_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_insurance_quote_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_quote' is set
if ('insurance_quote' not in params or
params['insurance_quote'] is None):
raise ValueError("Missing the required parameter `insurance_quote` when calling `delete_insurance_quote_using_delete`") # noqa: E501
# verify the required parameter 'insurance_quote_id' is set
if ('insurance_quote_id' not in params or
params['insurance_quote_id'] is None):
raise ValueError("Missing the required parameter `insurance_quote_id` when calling `delete_insurance_quote_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_quote' in params:
path_params['insurance_quote'] = params['insurance_quote'] # noqa: E501
if 'insurance_quote_id' in params:
path_params['insurance_quote_id'] = params['insurance_quote_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_quote/{insurance_quote_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_coverage_all_using_get(self, **kwargs): # noqa: E501
"""Get all insurance coverage request # noqa: E501
Get all new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_coverage_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_coverage_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_insurance_coverage_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_insurance_coverage_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all insurance coverage request # noqa: E501
Get all new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_coverage_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_coverage_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_coverage', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageInsuranceCoverage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_coverage_using_get(self, insurance_coverage_id, **kwargs): # noqa: E501
"""Get a insurance coverage request # noqa: E501
Get a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_coverage_using_get(insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_coverage_using_get_with_http_info(insurance_coverage_id, **kwargs) # noqa: E501
else:
(data) = self.get_insurance_coverage_using_get_with_http_info(insurance_coverage_id, **kwargs) # noqa: E501
return data
def get_insurance_coverage_using_get_with_http_info(self, insurance_coverage_id, **kwargs): # noqa: E501
"""Get a insurance coverage request # noqa: E501
Get a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_coverage_using_get_with_http_info(insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_coverage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_coverage_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_coverage_id' is set
if ('insurance_coverage_id' not in params or
params['insurance_coverage_id'] is None):
raise ValueError("Missing the required parameter `insurance_coverage_id` when calling `get_insurance_coverage_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_coverage_id' in params:
path_params['insurance_coverage_id'] = params['insurance_coverage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_coverage/{insurance_coverage_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceCoverage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_discount_all_using_get(self, **kwargs): # noqa: E501
"""Get all insurance discount request # noqa: E501
Get all new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_discount_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_discount_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_insurance_discount_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_insurance_discount_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all insurance discount request # noqa: E501
Get all new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_discount_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_discount_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_discount', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageInsuranceDiscount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_discount_using_get(self, insurance_discount_id, **kwargs): # noqa: E501
"""Get a insurance discount request # noqa: E501
Get a new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_discount_using_get(insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_discount_using_get_with_http_info(insurance_discount_id, **kwargs) # noqa: E501
else:
(data) = self.get_insurance_discount_using_get_with_http_info(insurance_discount_id, **kwargs) # noqa: E501
return data
def get_insurance_discount_using_get_with_http_info(self, insurance_discount_id, **kwargs): # noqa: E501
"""Get a insurance discount request # noqa: E501
Get a new insurance discount. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_discount_using_get_with_http_info(insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_discount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_discount_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_discount_id' is set
if ('insurance_discount_id' not in params or
params['insurance_discount_id'] is None):
raise ValueError("Missing the required parameter `insurance_discount_id` when calling `get_insurance_discount_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_discount_id' in params:
path_params['insurance_discount_id'] = params['insurance_discount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_discount/{insurance_discount_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceDiscount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_quote_all_using_get(self, **kwargs): # noqa: E501
"""List all insuranceQuote requests # noqa: E501
Get the information for all insuranceQuote requests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_quote_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_quote_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_insurance_quote_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_insurance_quote_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all insuranceQuote requests # noqa: E501
Get the information for all insuranceQuote requests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_quote_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageInsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_quote_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_quote', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageInsuranceQuote', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_insurance_quote_using_get(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Retrieve a insuranceQuote request # noqa: E501
Retrieve the information for a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_quote_using_get(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_quote: UUID insurance_quote_id (required)
:param str insurance_quote_id: insurance_quote_id (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_insurance_quote_using_get_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
else:
(data) = self.get_insurance_quote_using_get_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
return data
def get_insurance_quote_using_get_with_http_info(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Retrieve a insuranceQuote request # noqa: E501
Retrieve the information for a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_insurance_quote_using_get_with_http_info(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str insurance_quote: UUID insurance_quote_id (required)
:param str insurance_quote_id: insurance_quote_id (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_quote', 'insurance_quote_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_insurance_quote_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_quote' is set
if ('insurance_quote' not in params or
params['insurance_quote'] is None):
raise ValueError("Missing the required parameter `insurance_quote` when calling `get_insurance_quote_using_get`") # noqa: E501
# verify the required parameter 'insurance_quote_id' is set
if ('insurance_quote_id' not in params or
params['insurance_quote_id'] is None):
raise ValueError("Missing the required parameter `insurance_quote_id` when calling `get_insurance_quote_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_quote' in params:
path_params['insurance_quote'] = params['insurance_quote'] # noqa: E501
if 'insurance_quote_id' in params:
path_params['insurance_quote_id'] = params['insurance_quote_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_quote/{insurance_quote_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceQuote', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_insurance_coverage_using_put(self, insurance_coverage, insurance_coverage_id, **kwargs): # noqa: E501
"""Update a insurance coverage request # noqa: E501
Update a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_coverage_using_put(insurance_coverage, insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceCoverage insurance_coverage: insurance_coverage (required)
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_insurance_coverage_using_put_with_http_info(insurance_coverage, insurance_coverage_id, **kwargs) # noqa: E501
else:
(data) = self.update_insurance_coverage_using_put_with_http_info(insurance_coverage, insurance_coverage_id, **kwargs) # noqa: E501
return data
def update_insurance_coverage_using_put_with_http_info(self, insurance_coverage, insurance_coverage_id, **kwargs): # noqa: E501
"""Update a insurance coverage request # noqa: E501
Update a new insurance coverage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_coverage_using_put_with_http_info(insurance_coverage, insurance_coverage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceCoverage insurance_coverage: insurance_coverage (required)
:param str insurance_coverage_id: UUID insurance_coverage_id (required)
:return: InsuranceCoverage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_coverage', 'insurance_coverage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_insurance_coverage_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_coverage' is set
if ('insurance_coverage' not in params or
params['insurance_coverage'] is None):
raise ValueError("Missing the required parameter `insurance_coverage` when calling `update_insurance_coverage_using_put`") # noqa: E501
# verify the required parameter 'insurance_coverage_id' is set
if ('insurance_coverage_id' not in params or
params['insurance_coverage_id'] is None):
raise ValueError("Missing the required parameter `insurance_coverage_id` when calling `update_insurance_coverage_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_coverage_id' in params:
path_params['insurance_coverage_id'] = params['insurance_coverage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_coverage' in params:
body_params = params['insurance_coverage']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_coverage/{insurance_coverage_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceCoverage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_insurance_discount_using_put(self, insurance_discount, insurance_discount_id, **kwargs): # noqa: E501
"""Update an insurance discount # noqa: E501
Update an new insurance . # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_discount_using_put(insurance_discount, insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceDiscount insurance_discount: insurance_discount (required)
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_insurance_discount_using_put_with_http_info(insurance_discount, insurance_discount_id, **kwargs) # noqa: E501
else:
(data) = self.update_insurance_discount_using_put_with_http_info(insurance_discount, insurance_discount_id, **kwargs) # noqa: E501
return data
def update_insurance_discount_using_put_with_http_info(self, insurance_discount, insurance_discount_id, **kwargs): # noqa: E501
"""Update an insurance discount # noqa: E501
Update an new insurance . # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_discount_using_put_with_http_info(insurance_discount, insurance_discount_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceDiscount insurance_discount: insurance_discount (required)
:param str insurance_discount_id: UUID insurance_discount_id (required)
:return: InsuranceDiscount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_discount', 'insurance_discount_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_insurance_discount_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_discount' is set
if ('insurance_discount' not in params or
params['insurance_discount'] is None):
raise ValueError("Missing the required parameter `insurance_discount` when calling `update_insurance_discount_using_put`") # noqa: E501
# verify the required parameter 'insurance_discount_id' is set
if ('insurance_discount_id' not in params or
params['insurance_discount_id'] is None):
raise ValueError("Missing the required parameter `insurance_discount_id` when calling `update_insurance_discount_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_discount_id' in params:
path_params['insurance_discount_id'] = params['insurance_discount_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_discount' in params:
body_params = params['insurance_discount']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_discount/{insurance_discount_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceDiscount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_insurance_quote_using_put(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Update a insuranceQuote request # noqa: E501
Update the information for a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_quote_using_put(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceQuote insurance_quote: insurance_quote (required)
:param str insurance_quote_id: UUID insurance_quote_id (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_insurance_quote_using_put_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
else:
(data) = self.update_insurance_quote_using_put_with_http_info(insurance_quote, insurance_quote_id, **kwargs) # noqa: E501
return data
def update_insurance_quote_using_put_with_http_info(self, insurance_quote, insurance_quote_id, **kwargs): # noqa: E501
"""Update a insuranceQuote request # noqa: E501
Update the information for a insuranceQuote request. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_insurance_quote_using_put_with_http_info(insurance_quote, insurance_quote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param InsuranceQuote insurance_quote: insurance_quote (required)
:param str insurance_quote_id: UUID insurance_quote_id (required)
:return: InsuranceQuote
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['insurance_quote', 'insurance_quote_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_insurance_quote_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'insurance_quote' is set
if ('insurance_quote' not in params or
params['insurance_quote'] is None):
raise ValueError("Missing the required parameter `insurance_quote` when calling `update_insurance_quote_using_put`") # noqa: E501
# verify the required parameter 'insurance_quote_id' is set
if ('insurance_quote_id' not in params or
params['insurance_quote_id'] is None):
raise ValueError("Missing the required parameter `insurance_quote_id` when calling `update_insurance_quote_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'insurance_quote_id' in params:
path_params['insurance_quote_id'] = params['insurance_quote_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'insurance_quote' in params:
body_params = params['insurance_quote']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/insurance_quote/{insurance_quote_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InsuranceQuote', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.571154
| 154
| 0.637545
| 7,524
| 66,411
| 5.313131
| 0.023126
| 0.047228
| 0.024415
| 0.027016
| 0.987543
| 0.982164
| 0.977436
| 0.97526
| 0.96678
| 0.954898
| 0
| 0.015288
| 0.279005
| 66,411
| 1,559
| 155
| 42.598461
| 0.819598
| 0.326934
| 0
| 0.834721
| 1
| 0
| 0.212818
| 0.082795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036861
| false
| 0
| 0.004756
| 0
| 0.096314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
958d1e3cedfbb52632717dd0096fa6230ab0d676
| 2,014
|
py
|
Python
|
src/uselessfacts/main.py
|
Jakeisbored/uselessfacts
|
a79e2ba0d99959ef20dff734d7a98697f0157c53
|
[
"Apache-2.0"
] | null | null | null |
src/uselessfacts/main.py
|
Jakeisbored/uselessfacts
|
a79e2ba0d99959ef20dff734d7a98697f0157c53
|
[
"Apache-2.0"
] | null | null | null |
src/uselessfacts/main.py
|
Jakeisbored/uselessfacts
|
a79e2ba0d99959ef20dff734d7a98697f0157c53
|
[
"Apache-2.0"
] | null | null | null |
import requests
formats = ['html' , 'md' , 'txt' , 'json']
languages = ['en','de']
def random_fact(format=None,language=None):
if(format == None) :
format = 'json'
if(not formats.__contains__(format)):
raise Exception(f'Invaid format , should be one of {str(formats)}')
else:
if(language == None) :
language = 'en'
else:
if(not languages.__contains__(language)):
raise Exception(f'Invalid language , should be one of {str(languages)}')
else:
language = language
try:
r = requests.get('https://uselessfacts.jsph.pl/random.{}?language={}'.format(format,language))
return {
'status_code' : r.status_code,
'response' : r.json() if format == 'json' else r.text
}
except Exception as error:
raise error
def daily_fact(format=None,language=None):
if(format == None) :
format = 'json'
if(not formats.__contains__(format)):
raise Exception(f'Invaid format , should be one of {str(formats)}')
else:
if(language == None) :
language = 'en'
else:
if(not languages.__contains__(language)):
raise Exception(f'Invalid language , should be one of {str(languages)}')
else:
language = language
try:
r = requests.get('https://uselessfacts.jsph.pl/today.{}?language={}'.format(format,language))
return {
'status_code' : r.status_code,
'response' : r.json() if format == 'json' else r.text
}
except Exception as error:
raise error
def get_fact(id=None , format=None):
if(id == None):
raise Exception('You must provide an id')
if(format == None) :
format = 'json'
if(not formats.__contains__(format)):
raise Exception(f'Invaid format , should be one of {str(formats)}')
try:
r = requests.get('https://uselessfacts.jsph.pl/{}.{}'.format(id,format))
return {
'status_code' : r.status_code,
'response' : r.json() if format == 'json' else r.text
}
except Exception as error:
raise Exception('Fact not found fool')
| 32.483871
| 98
| 0.632075
| 260
| 2,014
| 4.784615
| 0.207692
| 0.078778
| 0.060289
| 0.052251
| 0.860129
| 0.860129
| 0.860129
| 0.860129
| 0.829582
| 0.829582
| 0
| 0
| 0.217478
| 2,014
| 61
| 99
| 33.016393
| 0.78934
| 0
| 0
| 0.75
| 0
| 0
| 0.258689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.016667
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
95cd88b7a4d70b7dee36b3146e5819bdc87f669b
| 9,892
|
py
|
Python
|
dotmotif/executors/test_grandisoexecutor.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 28
|
2020-06-12T20:46:15.000Z
|
2022-02-05T18:33:46.000Z
|
dotmotif/executors/test_grandisoexecutor.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 26
|
2020-06-09T20:09:32.000Z
|
2022-02-01T18:22:20.000Z
|
dotmotif/executors/test_grandisoexecutor.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 4
|
2021-03-08T02:47:49.000Z
|
2021-09-13T19:16:29.000Z
|
import unittest
import dotmotif
from dotmotif import Motif
from dotmotif.executors import GrandIsoExecutor
from dotmotif.executors.NetworkXExecutor import (
_edge_satisfies_constraints,
_node_satisfies_constraints,
)
from dotmotif.parsers.v2 import ParserV2
import networkx as nx
class TestSmallMotifs(unittest.TestCase):
def test_edgecount_motif(self):
dm = Motif("""A->B""")
H = nx.DiGraph()
H.add_edge("x", "y")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 1)
H.add_edge("x", "y")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 1)
H.add_edge("x", "z")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 2)
def test_fullyconnected_triangle_motif(self):
dm = Motif(
"""
A->B
B->C
C->A
"""
)
H = nx.DiGraph()
H.add_edge("x", "y")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 0)
H.add_edge("y", "z")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 0)
H.add_edge("z", "x")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 3)
def test_edge_attribute_equality(self):
dm = Motif(
"""
A->B [weight==10, area==4]
"""
)
H = nx.DiGraph()
H.add_edge("z", "x", weight=10, area=4)
H.add_edge("x", "y")
H.add_edge("y", "z", weight=5)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(dm)), 1)
def test_one_instance(self):
H = nx.DiGraph()
H.add_edge("x", "y", weight=1)
H.add_edge("y", "z", weight=10)
H.add_edge("z", "x", weight=5)
motif = dotmotif.Motif(
"""
A -> B [weight>=11]
""".strip()
)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 0)
def test_two_instance(self):
H = nx.DiGraph()
H.add_edge("x", "y", weight=1)
H.add_edge("y", "z", weight=10)
H.add_edge("z", "x", weight=5)
H.add_edge("z", "a", weight=5)
H.add_edge("a", "b", weight=1)
H.add_edge("b", "c", weight=10)
H.add_edge("c", "a", weight=5)
motif = dotmotif.Motif(
"""
A -> B [weight>=7]
""".strip()
)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 2)
def test_triangle_two_instance(self):
H = nx.DiGraph()
H.add_edge("x", "y", weight=1)
H.add_edge("y", "z", weight=10)
H.add_edge("z", "x", weight=5)
H.add_edge("z", "a", weight=5)
H.add_edge("a", "b", weight=1)
H.add_edge("b", "c", weight=10)
H.add_edge("c", "a", weight=5)
motif = dotmotif.Motif(
"""
A -> B [weight>=7]
B -> C
C -> A
""".strip()
)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 2)
def test_mini_example(self):
H = nx.DiGraph()
H.add_edge("y", "x", ATTRIBUTE=7)
H.add_edge("y", "z", ATTRIBUTE=7)
motif = dotmotif.Motif(
"""
A -> B [ATTRIBUTE>=7]
""".strip()
)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 2)
def test_node_and_edge_full_example(self):
H = nx.DiGraph()
H.add_edge("X", "Y", weight=10)
H.add_edge("Y", "Z", weight=9)
H.add_edge("Z", "X", weight=8)
motif = dotmotif.Motif(
"""
A -> B [weight>=7]
""".strip()
)
res = GrandIsoExecutor(graph=H).find(motif)
self.assertEqual(len(res), 3)
H.add_edge("Z", "C", weight=7)
res = GrandIsoExecutor(graph=H).find(motif)
self.assertEqual(len(res), 4)
H.add_edge("Z", "D", weight="no")
res = GrandIsoExecutor(graph=H).find(motif)
self.assertEqual(len(res), 4)
H.add_edge("y", "a")
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 4)
H.add_edge("y", "a", other_weight=7, weight=8)
self.assertEqual(len(GrandIsoExecutor(graph=H).find(motif)), 5)
def test_automorphism_reduction(self):
G = nx.DiGraph()
G.add_edge("X", "Z")
G.add_edge("Y", "Z")
motif = dotmotif.Motif(
"""
A -> C
B -> C
A === B
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 2)
motif = dotmotif.Motif(exclude_automorphisms=True).from_motif(
"""
A -> C
B -> C
A === B
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 1)
def test_automorphism_auto(self):
G = nx.DiGraph()
G.add_edge("X", "Z")
G.add_edge("Y", "Z")
motif = dotmotif.Motif(exclude_automorphisms=True).from_motif(
"""
A -> C
B -> C
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 1)
def test_automorphism_notauto(self):
G = nx.DiGraph()
G.add_edge("X", "Z")
G.add_edge("Y", "Z")
motif = dotmotif.Motif(
"""
A -> C
B -> C
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 2)
def test_automorphism_flag_triangle(self):
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
motif = dotmotif.Motif(
"""
A -> B
B -> C
C -> A
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 3)
motif = dotmotif.Motif(exclude_automorphisms=True).from_motif(
"""
A -> B
B -> C
C -> A
"""
)
res = GrandIsoExecutor(graph=G).find(motif)
self.assertEqual(len(res), 1)
class TestDynamicNodeConstraints(unittest.TestCase):
def test_dynamic_constraints_zero_results(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=5)
G.add_node("B", radius=10)
exp = """\
A -> B
A.radius > B.radius
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 0)
def test_dynamic_constraints_one_result(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=25)
G.add_node("B", radius=10)
exp = """\
A -> B
A.radius > B.radius
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 1)
def test_dynamic_constraints_two_results(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=25)
G.add_node("B", radius=10)
G.add_node("C", radius=5)
exp = """\
A -> B
A.radius > B.radius
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 2)
def test_dynamic_constraints_in_macros_zero_results(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=5)
G.add_node("B", radius=10)
exp = """\
macro(A, B) {
A.radius > B.radius
}
macro(A, B)
A -> B
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 0)
def test_dynamic_constraints_in_macros_one_result(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=15)
G.add_node("B", radius=10)
exp = """\
macro(A, B) {
A.radius > B.radius
}
macro(A, B)
A -> B
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 1)
def test_dynamic_constraints_in_macros_two_result(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
G = nx.DiGraph()
G.add_edge("A", "B")
G.add_edge("B", "C")
G.add_edge("C", "A")
G.add_node("A", radius=15)
G.add_node("B", radius=10)
G.add_node("C", radius=10)
exp = """\
macro(A, B) {
A.radius >= B.radius
}
macro(A, B)
A -> B
"""
dm = dotmotif.Motif(parser=ParserV2)
res = GrandIsoExecutor(graph=G).find(dm.from_motif(exp))
self.assertEqual(len(res), 2)
| 26.449198
| 71
| 0.503134
| 1,267
| 9,892
| 3.805051
| 0.078137
| 0.090023
| 0.058079
| 0.086289
| 0.870774
| 0.851898
| 0.819125
| 0.812694
| 0.763327
| 0.722879
| 0
| 0.015887
| 0.331884
| 9,892
| 373
| 72
| 26.520107
| 0.713572
| 0.046603
| 0
| 0.710084
| 0
| 0
| 0.074551
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.07563
| false
| 0
| 0.029412
| 0
| 0.113445
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
667fdb0824d7d709028cb1c834695fba8cb0946e
| 24,515
|
py
|
Python
|
baoming/webapp/controller/search/administrator.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | null | null | null |
baoming/webapp/controller/search/administrator.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | 14
|
2020-06-06T01:24:24.000Z
|
2022-03-12T00:17:22.000Z
|
baoming/webapp/controller/search/administrator.py
|
hanxiaoshun/RegistrationSystem
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
[
"Apache-2.0"
] | null | null | null |
import json
from django.core.paginator import Paginator
from ..renderUtil import render_result
from .search_common import *
from .search_param_deal import search_return, search_parameter
from webapp.utils.date_encoder import *
sys_msg = '报名系统'
result = {'status': True, 'message': ''}
def administrator_search_chemical(request):
"""
化工类待条件查询
:param request:
:return:
"""
title_msg = '化工类学员'
param_result = search_parameter(request)
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill}
if param_result:
return render_result(request,
"page_main_controller/administrator/reporter_chemical.html",
page_result)
def administrator_search_chemical_not(request):
"""
(非化工类)化工类待条件查询
:param request:
:return:
"""
title_msg = "学员报名资料(非化工)"
param_result = search_parameter(request)
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill}
if param_result:
return render_result(request,
"page_main_controller/administrator/reporter_chemical_not.html",
page_result)
def administrator_search_all_student(request):
"""
所有学员的条件查询
:param request:
:return:
"""
title_msg = '查询所有学员'
param_result = search_parameter(request, 'all_student')
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill,'no_term': True}
if param_result:
return render_result(request,
"page_main_controller/administrator/all_student_base_info.html",
page_result)
def administrator_search_wait_confirm(request):
"""
待确认学生信息列表
:param request:
:return:
"""
title_msg = '学生填报信息待确认列表'
param_result = search_parameter(request, 'wait_confirm')
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,
'report_skill':report_skill,
'no_term': True}
if param_result:
return render_result(request,
"page_main_controller/administrator/report_student_info_list_admin.html",
page_result)
def administrator_reporter_electronic_communication(request):
"""
电子通信类
:param request:
:return:
"""
param_result = search_parameter(request, 'electronic_communication')
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
title_msg = "电子通信类导入模板"
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill}
if param_result:
return render_result(request,
"page_main_controller/administrator/reporter_electronic_communication.html",
page_result)
def administrator_reporter_spin(request):
"""
纺织类
:param request:
:return:
"""
title_msg = '查询所有纺织大类学员'
param_result = search_parameter(request, 'spin')
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill}
if param_result:
return render_result(request,
"page_main_controller/administrator/reporter_spin.html",
page_result)
def administrator_worker_years_6(request):
"""
工作满6年(含)以上人员名单
:param request:
:return:
"""
title_msg = '工作满6年(含)以上人员名单'
param_result = search_parameter(request, '')
if 'school_term' in param_result:
if param_result['school_term'] is None:
message = '尚未添加->报考学期->信息,请->管理员->添加相关信息'
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'user_search_error_class' in param_result:
if param_result['user_search_error_class'] is not None:
print(param_result['user_search_error_class'], param_result['user_search_errors'])
message = '系统提示:参数传输错误:' + param_result['user_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
elif 'student_search_error_class' in param_result:
if param_result['student_search_error_class'] is not None:
print(param_result['student_search_error_class'], param_result['student_search_errors'])
message = '系统提示:获取当前用户信息失败:' + param_result['student_search_errors']
return render_result(request, "page_main_controller/message.html",
{'title_msg': title_msg, 'message': message})
else:
report_skill_main_list = param_result['report_skill_main_list']
report_skill_list = param_result['report_skill_list']
tmp_list = param_result['tmp_list']
last_school_term = param_result['last_school_term']
student_info = param_result['student_info']
contacts = param_result['contacts']
teacher_infos = param_result['teacher_infos']
school_terms = param_result['school_terms']
school_term = param_result['school_term']
teacher_info = param_result['teacher_info']
identification_level = param_result['identification_level']
report_skill_main = param_result['report_skill_main']
report_skill = param_result['report_skill']
page_result = {'title_msg': title_msg,
'need_login': False,
'report_skill_main_list':report_skill_main_list,
'report_skill_list':report_skill_list,
'tmp_list': json.dumps(tmp_list, ensure_ascii=False),
'last_school_term': last_school_term, 'student_info': student_info,
'contacts': contacts, 'teacher_infos': teacher_infos,
'school_terms': school_terms, 'school_term': school_term,
'teacher_info': teacher_info,
'identification_level': identification_level,
'report_skill_main':report_skill_main,'report_skill':report_skill}
if param_result:
return render_result(request,
"page_main_controller/administrator/reporter_worker_years_6.html",
page_result)
| 59.792683
| 120
| 0.572303
| 2,417
| 24,515
| 5.353331
| 0.043442
| 0.160677
| 0.06492
| 0.0541
| 0.949996
| 0.929438
| 0.909962
| 0.909962
| 0.909962
| 0.909962
| 0
| 0.000248
| 0.34228
| 24,515
| 410
| 121
| 59.792683
| 0.80222
| 0.009912
| 0
| 0.892351
| 0
| 0
| 0.265273
| 0.123968
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01983
| false
| 0
| 0.016997
| 0
| 0.116147
| 0.03966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dd71b0fb8d569e7cf2ee675f50d1aa265788903f
| 146
|
py
|
Python
|
netbox_graphql/tests/utils.py
|
ninech/django-netbox-graphql
|
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
|
[
"MIT"
] | 17
|
2017-08-17T02:38:09.000Z
|
2022-01-05T15:36:20.000Z
|
netbox_graphql/tests/utils.py
|
ninech/django-netbox-graphql
|
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
|
[
"MIT"
] | 2
|
2017-09-13T14:53:56.000Z
|
2018-02-08T14:06:54.000Z
|
netbox_graphql/tests/utils.py
|
ninech/django-netbox-graphql
|
8383570bdf3a8ce8d9d912c5b8f7b053b31c7363
|
[
"MIT"
] | 2
|
2020-03-04T11:51:10.000Z
|
2021-03-11T19:24:37.000Z
|
from graphql_relay.node.node import from_global_id, to_global_id
def obj_to_global_id(obj):
return to_global_id(type(obj).__name__, obj.id)
| 24.333333
| 64
| 0.80137
| 27
| 146
| 3.814815
| 0.481481
| 0.31068
| 0.291262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 146
| 5
| 65
| 29.2
| 0.792308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
06e442cf2393ab3f5c198839ce7144b9e59bfe3b
| 232
|
py
|
Python
|
{{cookiecutter.repository_name}}/{{cookiecutter.package_name}}/model/__init__.py
|
Aiwizo/pytorch-lantern-template
|
cc20b82ac91d0291c0c981f4afc87ca4c6422207
|
[
"Apache-2.0"
] | 1
|
2021-04-13T14:13:16.000Z
|
2021-04-13T14:13:16.000Z
|
{{cookiecutter.repository_name}}/{{cookiecutter.package_name}}/model/__init__.py
|
Aiwizo/pytorch-lantern-template
|
cc20b82ac91d0291c0c981f4afc87ca4c6422207
|
[
"Apache-2.0"
] | 10
|
2020-12-17T07:26:29.000Z
|
2021-08-13T07:43:17.000Z
|
{{cookiecutter.repository_name}}/{{cookiecutter.package_name}}/model/__init__.py
|
Aiwizo/pytorch-lantern-template
|
cc20b82ac91d0291c0c981f4afc87ca4c6422207
|
[
"Apache-2.0"
] | null | null | null |
from {{cookiecutter.package_name}}.model.standardized_image import StandardizedImage
from {{cookiecutter.package_name}}.model.prediction import Prediction, PredictionBatch
from {{cookiecutter.package_name}}.model.model import Model
| 58
| 86
| 0.849138
| 26
| 232
| 7.423077
| 0.423077
| 0.248705
| 0.357513
| 0.419689
| 0.497409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056034
| 232
| 3
| 87
| 77.333333
| 0.881279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
66131fa66c1183d21a56009eb259eb3983959979
| 34,923
|
py
|
Python
|
pytest/testPyTX.py
|
Manny27nyc/BitcoinArmory
|
1d02a6640d6257ab0c37013e5cd4b99681a5cfc3
|
[
"MIT"
] | 505
|
2016-02-04T15:54:46.000Z
|
2022-03-27T18:43:01.000Z
|
pytest/testPyTX.py
|
jimmysong/BitcoinArmory
|
1c7190176897a2e0f3e4e198ab2f199059bb2402
|
[
"MIT"
] | 528
|
2016-02-06T19:50:12.000Z
|
2022-01-15T10:21:16.000Z
|
pytest/testPyTX.py
|
jimmysong/BitcoinArmory
|
1c7190176897a2e0f3e4e198ab2f199059bb2402
|
[
"MIT"
] | 208
|
2015-01-02T10:31:40.000Z
|
2021-12-14T07:37:36.000Z
|
'''
Created on Aug 4, 2013
@author: Andy
'''
import sys
sys.path.append('..')
import unittest
from pytest.Tiab import TiabTest
# Do not put any other imports before TiabTest ################
from armoryengine.ArmoryUtils import hex_to_binary, binary_to_hex, hex_to_int, \
ONE_BTC
from armoryengine.BinaryUnpacker import BinaryUnpacker
from armoryengine.Block import PyBlock
from armoryengine.PyBtcAddress import PyBtcAddress
from armoryengine.Script import PyScriptProcessor
from armoryengine.Transaction import PyTx, PyTxIn, PyOutPoint, PyTxOut, \
PyCreateAndSignTx, getMultisigScriptInfo, BlockComponent,\
PyCreateAndSignTx_old
# Unserialize an reserialize
tx1raw = hex_to_binary( \
'01000000016290dce984203b6a5032e543e9e272d8bce934c7de4d15fa0fe44d'
'd49ae4ece9010000008b48304502204f2fa458d439f957308bca264689aa175e'
'3b7c5f78a901cb450ebd20936b2c500221008ea3883a5b80128e55c9c6070aa6'
'264e1e0ce3d18b7cd7e85108ce3d18b7419a0141044202550a5a6d3bb81549c4'
'a7803b1ad59cdbba4770439a4923624a8acfc7d34900beb54a24188f7f0a4068'
'9d905d4847cc7d6c8d808a457d833c2d44ef83f76bffffffff0242582c0a0000'
'00001976a914c1b4695d53b6ee57a28647ce63e45665df6762c288ac80d1f008'
'000000001976a9140e0aec36fe2545fb31a41164fb6954adcd96b34288ac00000000')
tx2raw = hex_to_binary( \
'0100000001f658dbc28e703d86ee17c9a2d3b167a8508b082fa0745f55be5144'
'a4369873aa010000008c49304602210041e1186ca9a41fdfe1569d5d807ca7ff'
'6c5ffd19d2ad1be42f7f2a20cdc8f1cc0221003366b5d64fe81e53910e156914'
'091d12646bc0d1d662b7a65ead3ebe4ab8f6c40141048d103d81ac9691cf13f3'
'fc94e44968ef67b27f58b27372c13108552d24a6ee04785838f34624b294afee'
'83749b64478bb8480c20b242c376e77eea2b3dc48b4bffffffff0200e1f50500'
'0000001976a9141b00a2f6899335366f04b277e19d777559c35bc888ac40aeeb'
'02000000001976a9140e0aec36fe2545fb31a41164fb6954adcd96b34288ac00000000')
multiTx1raw = hex_to_binary( \
'0100000004a14fd232f045f0c9f28c6848a22fee393152e901eaa61a9f18438b3ba05c6035010000008a47304402201b19808aa145dbebf775ed11a15d763eaa2'
'b5df92b20f9835f62c72404918b1b02205aea3e816ac6ac7545254b9c34a00c37f20024793bbe0a64958934343f3c577b014104c0f3d0a4920bb6825769dd6ae1'
'e36b0ac36581639d605241cdd548c4ef5d46cda5ac21723d478041a63118f192fdb730c4cf76106789824cd68879a7afeb5288ffffffffa14fd232f045f0c9f28'
'c6848a22fee393152e901eaa61a9f18438b3ba05c6035000000008b4830450220796307d9787b892c8b1ada8511d99e855ea3099e1a76ce0f7aa783ed352a6e59'
'022100fc38d05d7dfbe51e28c36d854dd0dcc938d60a3e406573c3dc39253694d14a12014104630aaf9d5c8d757cb5759428d4075911a2b2ff13dd7208ad7ea1d'
'1682738a7138be93ee526c9d774e0dea03fa2a5fbb68043259ddfb942c0763f9b636b40c43fffffffffa14fd232f045f0c9f28c6848a22fee393152e901eaa61a'
'9f18438b3ba05c6035020000008c493046022100cb423b63197ef3cdbfaed69f61aac59755f0025bd6d7a9d3c78024d897ebcf94022100f3ad14804a3c8042387'
'eca9b9053abe99e12651a795cae7f546b08e1c08c6464014104649694df12dcd7fdb5a8c54c376b904bd7337891d865b8d306beb5d2e5d8fdf2a537d6f9df65ff'
'44eb0b6042ebfdf9e338bff7f4afacb359dd6c71aea7b9b92dffffffffa14fd232f045f0c9f28c6848a22fee393152e901eaa61a9f18438b3ba05c60350300000'
'08b483045022100fb9f4ddc68497a266362d489abf05184909a2b99aa64803061c88597b725877802207f39cf5a90a305aee45f365cf9e2d258e37cab4da6c123'
'aa287635cd1fd40dd001410438252055130f3dd242201684931550c4065efc1b87c48192f75868f747e2a9df9a700fed7e90068bd395c58680bd593780c8119e7'
'981dae08c345588f120fcb4ffffffff02e069f902000000001976a914ad00cf2b893e132c33a79a22ae938d6309c780a488ac80f0fa02000000001976a9143155'
'18b646ea65ad148ee1e2f0360233617447e288ac00000000')
multiTx2raw = hex_to_binary( \
'0100000004a14fd232f045f0c9f28c6848a22fee393152e901eaa61a9f18438b3ba05c6035010000008a47304402201b19808aa145dbebf775ed11a15d763eaa2'
'b5df92b20f9835f62c72404918b1b02205aea3e816ac6ac7545254b9c34a00c37f20024793bbe0a64958934343f3c577b014104c0f3d0a4920bb6825769dd6ae1'
'e36b0ac36581639d605241cdd548c4ef5d46cda5ac21723d478041a63118f192fdb730c4cf76106789824cd68879a7afeb5288ffffffffa14fd232f045f0c9f28'
'c6848a22fee393152e901eaa61a9f18438b3ba05c6035000000008b4830450220796307d9787b892c8b1ada8511d99e855ea3099e1a76ce0f7aa783ed352a6e59'
'022100fc38d05d7dfbe51e28c36d854dd0dcc938d60a3e406573c3dc39253694d14a12014104630aaf9d5c8d757cb5759428d4075911a2b2ff13dd7208ad7ea1d'
'1682738a7138be93ee526c9d774e0dea03fa2a5fbb68043259ddfb942c0763f9b636b40c43fffffffffa14fd232f045f0c9f28c6848a22fee393152e901eaa61a'
'9f18438b3ba05c6035020000008c493046022100cb423b63197ef3cdbfaed69f61aac59755f0025bd6d7a9d3c78024d897ebcf94022100f3ad14804a3c8042387'
'eca9b9053abe99e12651a795cae7f546b08e1c08c6464014104649694df12dcd7fdb5a8c54c376b904bd7337891d865b8d306beb5d2e5d8fdf2a537d6f9df65ff'
'44eb0b6042ebfdf9e338bff7f4afacb359dd6c71aea7b9b92dffffffffa14fd232f045f0c9f28c6848a22fee393152e901eaa61a9f18438b3ba05c60350300000'
'08c49304602220000fb9f4ddc68497a266362d489abf05184909a2b99aa64803061c88597b725877802207f39cf5a90a305aee45f365cf9e2d258e37cab4da6c123'
'aa287635cd1fd40dd001410438252055130f3dd242201684931550c4065efc1b87c48192f75868f747e2a9df9a700fed7e90068bd395c58680bd593780c8119e7'
'981dae08c345588f120fcb4ffffffff02e069f902000000001976a914ad00cf2b893e132c33a79a22ae938d6309c780a488ac80f0fa02000000001976a9143155'
'18b646ea65ad148ee1e2f0360233617447e288ac00000000')
# has both Multi-SigmultiSig2of3 bare and P2SH
# First input is bare, and 2nd and 3rd are P2SH
multiSig2of3 = hex_to_binary((
'01000000 036bceb0 631853d2 e9d8597c f91b7339 7e1ad838 fa1f1396 275c5ad3'
'32ea0c16 15010000 00920048 30450221 00909e02 1f8d9482 04773a1e e953459f'
'96b42247 7e0f11ba b54a4bb8 d1fadea8 0d02202d 1b79dcbc 8e3a2b36 3cc971ae'
'f7cacb42 3bef200e ebcbb680 fce6c475 f9175801 47304402 20326f53 d77c049b'
'7627fd52 25cf0542 f16e5d84 99714b68 2aa11e9e e389605f 31022007 b9bfac66'
'886efdca eef17581 21646c0d 97fbf7f5 639538e0 06aee09e 3c471101 ffffffff'
'fdba3759 bac4d06b 9a16e669 96a986fc 13108842 ebbe87fc eadf4752 4b4809dd'
'00000000 fd5c0100 47304402 2055c491 84845c1d 92c81ad8 f0085b80 8e00fc58'
'6c8c8332 6177213a 5778a35d 2302200d 2bd241fd 8f8c77db 0b12517d 4edbeaed'
'47dd21d7 ffd46729 4992fe33 fb1c4201 47304402 201ff20a ce41831b b7902f0c'
'a5ecd6cb 3f681f23 47d89cd3 ba2e5cce 1e2e9cc4 f302204d fab13267 729f0290'
'a1b22e39 ce951b91 c3102b82 99dc6bb7 4aed7de5 83045a01 4cc95241 04390ad0'
'36732b60 991854df d75f2a69 f9c66f05 05d031dd 7883be1d 411dea29 a97c1cb3'
'c172344d eea11050 e21d4dd5 647241de f6cdfb30 db27aad5 f63817b7 ac410466'
'f9270b58 4c3e0418 277b8bd6 046b609d 77eac38b 6be4384e 589f3335 1976884b'
'8944b03d 0f6f6bcd 08aba612 4cae1af1 34514e0e 958064ae 9eaef831 055d6441'
'04cfaa15 4390a4fa 244fd064 ec8e61ac 0c3e9ccf 94a4a7f4 d89ac946 b7005080'
'82f5a63b 2f25fdfc 3621c94b ead1c378 2793c53f 0734cc18 08ed3b79 5ce94a40'
'4c53aeff ffffff2c 94040fe5 d781fb35 d779cf0c 88bae45b d1bd03b5 761bae32'
'f8cf4d4c 3c31c400 000000fd 5e010048 30450221 00824183 2fd85b99 4bc54168'
'8db9daf5 fb90244c 5f0eb9d5 4142b092 c8dce878 cc022049 67a455d9 852afaea'
'15c9f788 7f31db72 368a8393 f6b4b34c 9488b765 618c3b01 48304502 2100b050'
'78411f13 42d10495 cb8bc7bd 1ede6748 c327c68f c5e90310 fe11dd7e 2d4c0220'
'410346b8 d40c54ed 237f0864 eeee0eb5 fa259026 6a5f1909 21ec973f 13a5bdb4'
'014cc952 4104390a d036732b 60991854 dfd75f2a 69f9c66f 0505d031 dd7883be'
'1d411dea 29a97c1c b3c17234 4deea110 50e21d4d d5647241 def6cdfb 30db27aa'
'd5f63817 b7ac4104 66f9270b 584c3e04 18277b8b d6046b60 9d77eac3 8b6be438'
'4e589f33 35197688 4b8944b0 3d0f6f6b cd08aba6 124cae1a f134514e 0e958064'
'ae9eaef8 31055d64 4104cfaa 154390a4 fa244fd0 64ec8e61 ac0c3e9c cf94a4a7'
'f4d89ac9 46b70050 8082f5a6 3b2f25fd fc3621c9 4bead1c3 782793c5 3f0734cc'
'1808ed3b 795ce94a 404c53ae ffffffff 01cfb8a2 09000000 00c95241 04390ad0'
'36732b60 991854df d75f2a69 f9c66f05 05d031dd 7883be1d 411dea29 a97c1cb3'
'c172344d eea11050 e21d4dd5 647241de f6cdfb30 db27aad5 f63817b7 ac410466'
'f9270b58 4c3e0418 277b8bd6 046b609d 77eac38b 6be4384e 589f3335 1976884b'
'8944b03d 0f6f6bcd 08aba612 4cae1af1 34514e0e 958064ae 9eaef831 055d6441'
'04cfaa15 4390a4fa 244fd064 ec8e61ac 0c3e9ccf 94a4a7f4 d89ac946 b7005080'
'82f5a63b 2f25fdfc 3621c94b ead1c378 2793c53f 0734cc18 08ed3b79 5ce94a40'
'4c53ae00 000000 ').replace(' ',''))
# has both Multi-Sig bare and P2SH
# First input is ...
multiSig7of7 = hex_to_binary((
'01000000 02827c86 94a5c3c3 698fee0c 30d8b1e8 7880f47e 4a99e1c5 7a060ffb'
'b09ad4ac ad000000 00fdfd01 00473044 02201a85 dde4134c f8491241 f5c33821'
'6a0c2771 1b519ef3 122429d7 e0016b21 4e960220 711ae401 457d3aa9 e6fa684f'
'e5238cee 54ff7b38 c754a722 2ab32b1d 5a6710d7 01483045 022100ca fad36ad8'
'79cd5c7e 3b3a5864 03e6f30d 8bfb8b3c 60c42c3b c3ee1ec1 41639f02 2074e0b8'
'2df54cf3 dc966351 4ccbc743 52cda16c f6e9181f 5c9bcbae 4b589d88 36014830'
'45022100 bbb047a8 6c75b089 df24b650 1e466db7 7a83cdcd e6a6c29a 7bf7349d'
'6a986ffd 02200806 86406105 0c05e797 e5f46b1a 3e0a28bb 65b86617 af2ea010'
'b58a1e46 63a20147 30440220 5da1823d e450841b 96f44d15 48ec6165 49dbecd1'
'defad45e ea767b88 6665291a 02201e4a 4b5139e3 34200c3f 171ab22c 4fac6e16'
'9011c17e cc473750 4c1d3bdb b09f0147 30440220 32a970ec 0d3fd10a e6e47aa3'
'388817e6 9c4a40e9 ef37d71c 935106d1 bf5a5f96 02201124 237ce7c9 eef01f1c'
'cb6f4c3f 8069b826 a97e999a 5efa150d 2149fe67 fc9f0148 30450221 00d799e6'
'2819bba2 691461a8 a5e0bd55 48df3f97 8091760b 437aec57 863ca5b5 9402202c'
'791d8949 93ee88f1 c3de2363 fa1c6200 005f2e41 85ed1a49 9a7cd174 1aeb8d01'
'48304502 21008f59 02bd3487 1bc920ad e293e08f b57bbdbb bd2127d5 14551866'
'b14befef ff620220 35eafa7e 653bffaf e681af7d b1cb86c0 4096f17f ac2edbce'
'e654e4b5 7b86e86d 01ffffff ff827c86 94a5c3c3 698fee0c 30d8b1e8 7880f47e'
'4a99e1c5 7a060ffb b09ad4ac ad010000 00fdd303 00483045 022100f9 c1bea188'
'7991f50e 78c7c67d 4ca5d6db 69254b94 089ec3dc 848e682d 1eb79502 200e564d'
'bf74024d e2a89439 726d8efe 522dfc01 587b749c 34a8c22e 98943e81 29014830'
'45022100 ebdd6a9a 45ac4be7 0f982a53 c79b9903 68635f0a 9dcb29ec 46845686'
'712e9459 02203c77 968795bb 8c360a5c 616c9695 26a56846 17c44635 b85c458a'
'76155ca7 528b0148 30450221 00eec14d 4d6cb1da 92e43c93 a3a088a1 7799696f'
'f7aa64b7 6e06207b b400b0db a0022068 df74f129 42681229 f5a99c34 f6cbf7bc'
'df10e8ff 3da0432a ead01fc6 523cda01 48304502 2100e7f4 c88fd69b 7a00255b'
'3e6d48bf 2d6249c0 8669fbc5 cfdd395e 76b0e5c1 cfb10220 27665a72 a75d1762'
'7c14589e d6d9f3c7 9b3c1f62 e5ab35cc 18e957b4 fcaa0e43 01483045 022100c6'
'73b28b2c 6f5be2dd c04395e3 aaf3d7ba f148e679 8629603c 5dfd7e01 f5b27d02'
'203db74e ebd0ebd2 dfb30912 316d0f0c 39e54431 0c2948b8 f1534f9b d31ae4fb'
'de014830 45022100 e484209f e5298481 3d6b3c74 ace64bc7 caaedc5e eccc4fb3'
'6026c6af 0851b50e 02207a26 d23168b7 31b8d3e8 ac351e17 370eeb33 69c1f684'
'6200fee0 5786fdb1 bfe50147 30440220 3636c311 c249013f f55d1987 3c70d003'
'eae19ae1 03bc60b6 44173865 2d882c9a 022069a9 c2a30200 d9c62116 ba6e5cb0'
'3a6772fc 01687225 dd87127a 87776de6 a2ef014d d1015741 0434fba0 192f2030'
'5e3d4c62 0efea962 6d0f9a90 9d2890dc 4101e945 89ea4e68 22b67efc eed5fbab'
'c1d994db 8abf9a86 fcc44606 ab76b6a5 d38a9930 0072208d 7e410446 cb30b98f'
'7d162fa6 5f8b34f7 6ebb0e46 4903b64d 93eac48a 021db98d 80a1416e 848af76e'
'0a2c79dd 2fda9616 2314db83 7863d8d8 1a956949 26cd8e58 2ccb8d41 0449ff69'
'21e263ec 2880c9fa 1620f42a 0c2cebf3 bfb78c51 bb462c50 852f0cd3 ab31470a'
'f0dc234a c9167da2 d962a25e fde71bb2 0ef53d6d 446c053f b8458399 d1410450'
'abee229f 06ca4ed9 cbff65e5 4cfdb562 6c4a707e aa5d40cb a181e56d 59ef36d3'
'638c7704 8cb0fbcd 3bf0cf78 39e668df 5401d89a f9075710 9da190c8 f67eec41'
'0452f588 273dda31 649aab7b f825c2e2 706962c2 0c17e738 7b3698de 06f7af09'
'c8d18a76 1162d510 915a8097 e29dcd5f f3d4de9d cac226da f2e3c61b 81b064be'
'82410461 9c4390ca 53825a15 a07ebf6d e2979bc9 c42c4de0 f57f3e83 cd7b5007'
'6a413799 6403ec86 2fd5c1d4 13b63683 36c6a2b6 c88bcb61 beb1009e 3a691572'
'c2799841 04bbee46 8827e700 4a9c535d c699e33e cf01a521 471738fa 2a25c432'
'58d13be5 f0654189 ca5c1a56 880791a6 1039fb65 de1d9056 836a0a7f 139369b2'
'46a42b94 ed57aeff ffffff02 007e5603 00000000 17a914ee 5ae7effc dc259821'
'70b8a822 978338c1 c3b3c987 e0ba3c00 00000000 17a914ee 5ae7effc dc259821'
'70b8a822 978338c1 c3b3c987 00000000 ').replace(' ',''))
# Here's a full block, which we should be able to parse and process
hexBlock = ( \
'01000000eb10c9a996a2340a4d74eaab41421ed8664aa49d18538bab59010000000000005a2f06efa9f2bd804f17877537f2080030cadbfa1eb50e02338117cc'
'604d91b9b7541a4ecfbb0a1a64f1ade70301000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804cfbb0a1a'
'02360affffffff0100f2052a01000000434104c2239c4eedb3beb26785753463be3ec62b82f6acd62efb65f452f8806f2ede0b338e31d1f69b1ce449558d7061'
'aa1648ddc2bf680834d3986624006a272dc21cac000000000100000003e8caa12bcb2e7e86499c9de49c45c5a1c6167ea4b894c8c83aebba1b6100f343010000'
'008c493046022100e2f5af5329d1244807f8347a2c8d9acc55a21a5db769e9274e7e7ba0bb605b26022100c34ca3350df5089f3415d8af82364d7f567a6a297f'
'cc2c1d2034865633238b8c014104129e422ac490ddfcb7b1c405ab9fb42441246c4bca578de4f27b230de08408c64cad03af71ee8a3140b40408a7058a1984a9'
'f246492386113764c1ac132990d1ffffffff5b55c18864e16c08ef9989d31c7a343e34c27c30cd7caa759651b0e08cae0106000000008c4930460221009ec9aa'
'3e0caf7caa321723dea561e232603e00686d4bfadf46c5c7352b07eb00022100a4f18d937d1e2354b2e69e02b18d11620a6a9332d563e9e2bbcb01cee559680a'
'014104411b35dd963028300e36e82ee8cf1b0c8d5bf1fc4273e970469f5cb931ee07759a2de5fef638961726d04bd5eb4e5072330b9b371e479733c942964bb8'
'6e2b22ffffffff3de0c1e913e6271769d8c0172cea2f00d6d3240afc3a20f9fa247ce58af30d2a010000008c493046022100b610e169fd15ac9f60fe2b507529'
'281cf2267673f4690ba428cbb2ba3c3811fd022100ffbe9e3d71b21977a8e97fde4c3ba47b896d08bc09ecb9d086bb59175b5b9f03014104ff07a1833fd8098b'
'25f48c66dcf8fde34cbdbcc0f5f21a8c2005b160406cbf34cc432842c6b37b2590d16b165b36a3efc9908d65fb0e605314c9b278f40f3e1affffffff0240420f'
'00000000001976a914adfa66f57ded1b655eb4ccd96ee07ca62bc1ddfd88ac007d6a7d040000001976a914981a0c9ae61fa8f8c96ae6f8e383d6e07e77133e88'
'ac00000000010000000138e7586e0784280df58bd3dc5e3d350c9036b1ec4107951378f45881799c92a4000000008a47304402207c945ae0bbdaf9dadba07bdf'
'23faa676485a53817af975ddf85a104f764fb93b02201ac6af32ddf597e610b4002e41f2de46664587a379a0161323a85389b4f82dda014104ec8883d3e4f7a3'
'9d75c9f5bb9fd581dc9fb1b7cdf7d6b5a665e4db1fdb09281a74ab138a2dba25248b5be38bf80249601ae688c90c6e0ac8811cdb740fcec31dffffffff022f66'
'ac61050000001976a914964642290c194e3bfab661c1085e47d67786d2d388ac2f77e200000000001976a9141486a7046affd935919a3cb4b50a8a0c233c286c'
'88ac00000000')
# I made these two tx in a fake blockchain... but they should still work
tx1Fake = PyTx().unserialize(hex_to_binary( (
'01000000 0163451d 1002611c 1388d5ba 4ddfdf99 196a86b5 990fb5b0 dc786207'
'4fdcb8ee d2000000 004a4930 46022100 cb02fb5a 910e7554 85e3578e 6e9be315'
'a161540a 73f84ee6 f5d68641 925c59ac 0221007e 530a1826 30b50e2c 12dd09cd'
'ebfd809f 038be982 bdc2c7e9 d4cbf634 9e088d01 ffffffff 0200ca9a 3b000000'
'001976a9 14cb2abd e8bccacc 32e893df 3a054b9e f7f227a4 ce88ac00 286bee00'
'00000019 76a914ee 26c56fc1 d942be8d 7a24b2a1 001dd894 69398088 ac000000'
'00' ).replace(' ','')))
tx2Fake = PyTx().unserialize(hex_to_binary( (
'01000000 01a5b837 da38b64a 6297862c ba8210d0 21ac59e1 2b7c6d7e 70c355f6'
'972ee7a8 6e010000 008c4930 46022100 89e47100 d88d5f8c 8f62a796 dac3afb8'
'f090c6fc 2eb0c4af ac7b7567 3a364c01 0221002b f40e554d ae51264b 0a86df17'
'3e45756a 89bbd302 4f166cc4 2cfd1874 13636901 41046868 0737c76d abb801cb'
'2204f57d be4e4579 e4f710cd 67dc1b42 27592c81 e9b5cf02 b5ac9e8b 4c9f49be'
'5251056b 6a6d011e 4c37f6b6 d17ede6b 55faa235 19e2ffff ffff0100 286bee00'
'00000019 76a914c5 22664fb0 e55cdc5c 0cea73b4 aad97ec8 34323288 ac000000'
'00' ).replace(' ','')))
expectedMultiTxInput1 = hex_to_binary( (
'47304402 20796307 d9787b89 2c8b1ada 8511d99e 855ea309 9e1a76ce 0f7aa783'
'ed352a6e 59022003 c72fa282 041ae1d7 3c927ab2 2f233581 d8d2a86e e32c77e3'
'9939563b 64f72f01 4104630a af9d5c8d 757cb575 9428d407 5911a2b2 ff13dd72'
'08ad7ea1 d1682738 a7138be9 3ee526c9 d774e0de a03fa2a5 fbb68043 259ddfb9'
'42c0763f 9b636b40 c43f').replace(' ',''))
expectedMultiTxInput2 = hex_to_binary( (
'48304502 2100cb42 3b63197e f3cdbfae d69f61aa c59755f0 025bd6d7 a9d3c780'
'24d897eb cf940220 0c52eb7f b5c37fbd c7813564 6fac5415 1c9c77cc 35ebf1bc'
'6b6755ab 0fa9dcdd 01410464 9694df12 dcd7fdb5 a8c54c37 6b904bd7 337891d8'
'65b8d306 beb5d2e5 d8fdf2a5 37d6f9df 65ff44eb 0b6042eb fdf9e338 bff7f4af'
'acb359dd 6c71aea7 b9b92d ').replace(' ',''))
txInput0 = hex_to_binary( (
'47304402 204f2fa4 58d439f9 57308bca 264689aa 175e3b7c 5f78a901 cb450ebd'
'20936b2c 50022071 5c77c5a4 7fed71aa 3639f8f5 59d9b09c a1f91523 cbc8536e'
'c9904fb7 7effa701 41044202 550a5a6d 3bb81549 c4a7803b 1ad59cdb ba477043'
'9a492362 4a8acfc7 d34900be b54a2418 8f7f0a40 689d905d 4847cc7d 6c8d808a'
'457d833c 2d44ef83 f76b').replace(' ',''))
multiSigTx2of3Input0 = hex_to_binary( (
'00483045 02210090 9e021f8d 94820477 3a1ee953 459f96b4 22477e0f 11bab54a'
'4bb8d1fa dea80d02 202d1b79 dcbc8e3a 2b363cc9 71aef7ca cb423bef 200eebcb'
'b680fce6 c475f917 58014730 44022032 6f53d77c 049b7627 fd5225cf 0542f16e'
'5d849971 4b682aa1 1e9ee389 605f3102 2007b9bf ac66886e fdcaeef1 75812164'
'6c0d97fb f7f56395 38e006ae e09e3c47 1101').replace(' ',''))
multiSigTx2of3Input1 = hex_to_binary( (
'00473044 022055c4 9184845c 1d92c81a d8f0085b 808e00fc 586c8c83 32617721'
'3a5778a3 5d230220 0d2bd241 fd8f8c77 db0b1251 7d4edbea ed47dd21 d7ffd467'
'294992fe 33fb1c42 01473044 02201ff2 0ace4183 1bb7902f 0ca5ecd6 cb3f681f'
'2347d89c d3ba2e5c ce1e2e9c c4f30220 4dfab132 67729f02 90a1b22e 39ce951b'
'91c3102b 8299dc6b b74aed7d e583045a 014cc952 4104390a d036732b 60991854'
'dfd75f2a 69f9c66f 0505d031 dd7883be 1d411dea 29a97c1c b3c17234 4deea110'
'50e21d4d d5647241 def6cdfb 30db27aa d5f63817 b7ac4104 66f9270b 584c3e04'
'18277b8b d6046b60 9d77eac3 8b6be438 4e589f33 35197688 4b8944b0 3d0f6f6b'
'cd08aba6 124cae1a f134514e 0e958064 ae9eaef8 31055d64 4104cfaa 154390a4'
'fa244fd0 64ec8e61 ac0c3e9c cf94a4a7 f4d89ac9 46b70050 8082f5a6 3b2f25fd'
'fc3621c9 4bead1c3 782793c5 3f0734cc 1808ed3b 795ce94a 404c53ae').replace(' ',''))
multiSigTx7of7Input0 = hex_to_binary( (
'00473044 02201a85 dde4134c f8491241 f5c33821 6a0c2771 1b519ef3 122429d7'
'e0016b21 4e960220 711ae401 457d3aa9 e6fa684f e5238cee 54ff7b38 c754a722'
'2ab32b1d 5a6710d7 01483045 022100ca fad36ad8 79cd5c7e 3b3a5864 03e6f30d'
'8bfb8b3c 60c42c3b c3ee1ec1 41639f02 2074e0b8 2df54cf3 dc966351 4ccbc743'
'52cda16c f6e9181f 5c9bcbae 4b589d88 36014830 45022100 bbb047a8 6c75b089'
'df24b650 1e466db7 7a83cdcd e6a6c29a 7bf7349d 6a986ffd 02200806 86406105'
'0c05e797 e5f46b1a 3e0a28bb 65b86617 af2ea010 b58a1e46 63a20147 30440220'
'5da1823d e450841b 96f44d15 48ec6165 49dbecd1 defad45e ea767b88 6665291a'
'02201e4a 4b5139e3 34200c3f 171ab22c 4fac6e16 9011c17e cc473750 4c1d3bdb'
'b09f0147 30440220 32a970ec 0d3fd10a e6e47aa3 388817e6 9c4a40e9 ef37d71c'
'935106d1 bf5a5f96 02201124 237ce7c9 eef01f1c cb6f4c3f 8069b826 a97e999a'
'5efa150d 2149fe67 fc9f0148 30450221 00d799e6 2819bba2 691461a8 a5e0bd55'
'48df3f97 8091760b 437aec57 863ca5b5 9402202c 791d8949 93ee88f1 c3de2363'
'fa1c6200 005f2e41 85ed1a49 9a7cd174 1aeb8d01 48304502 21008f59 02bd3487'
'1bc920ad e293e08f b57bbdbb bd2127d5 14551866 b14befef ff620220 35eafa7e'
'653bffaf e681af7d b1cb86c0 4096f17f ac2edbce e654e4b5 7b86e86d 01').replace(' ',''))
multiSigTx7of7Input1 = hex_to_binary( (
'00483045 022100f9 c1bea188 7991f50e 78c7c67d 4ca5d6db 69254b94 089ec3dc'
'848e682d 1eb79502 200e564d bf74024d e2a89439 726d8efe 522dfc01 587b749c'
'34a8c22e 98943e81 29014830 45022100 ebdd6a9a 45ac4be7 0f982a53 c79b9903'
'68635f0a 9dcb29ec 46845686 712e9459 02203c77 968795bb 8c360a5c 616c9695'
'26a56846 17c44635 b85c458a 76155ca7 528b0148 30450221 00eec14d 4d6cb1da'
'92e43c93 a3a088a1 7799696f f7aa64b7 6e06207b b400b0db a0022068 df74f129'
'42681229 f5a99c34 f6cbf7bc df10e8ff 3da0432a ead01fc6 523cda01 48304502'
'2100e7f4 c88fd69b 7a00255b 3e6d48bf 2d6249c0 8669fbc5 cfdd395e 76b0e5c1'
'cfb10220 27665a72 a75d1762 7c14589e d6d9f3c7 9b3c1f62 e5ab35cc 18e957b4'
'fcaa0e43 01483045 022100c6 73b28b2c 6f5be2dd c04395e3 aaf3d7ba f148e679'
'8629603c 5dfd7e01 f5b27d02 203db74e ebd0ebd2 dfb30912 316d0f0c 39e54431'
'0c2948b8 f1534f9b d31ae4fb de014830 45022100 e484209f e5298481 3d6b3c74'
'ace64bc7 caaedc5e eccc4fb3 6026c6af 0851b50e 02207a26 d23168b7 31b8d3e8'
'ac351e17 370eeb33 69c1f684 6200fee0 5786fdb1 bfe50147 30440220 3636c311'
'c249013f f55d1987 3c70d003 eae19ae1 03bc60b6 44173865 2d882c9a 022069a9'
'c2a30200 d9c62116 ba6e5cb0 3a6772fc 01687225 dd87127a 87776de6 a2ef014d'
'd1015741 0434fba0 192f2030 5e3d4c62 0efea962 6d0f9a90 9d2890dc 4101e945'
'89ea4e68 22b67efc eed5fbab c1d994db 8abf9a86 fcc44606 ab76b6a5 d38a9930'
'0072208d 7e410446 cb30b98f 7d162fa6 5f8b34f7 6ebb0e46 4903b64d 93eac48a'
'021db98d 80a1416e 848af76e 0a2c79dd 2fda9616 2314db83 7863d8d8 1a956949'
'26cd8e58 2ccb8d41 0449ff69 21e263ec 2880c9fa 1620f42a 0c2cebf3 bfb78c51'
'bb462c50 852f0cd3 ab31470a f0dc234a c9167da2 d962a25e fde71bb2 0ef53d6d'
'446c053f b8458399 d1410450 abee229f 06ca4ed9 cbff65e5 4cfdb562 6c4a707e'
'aa5d40cb a181e56d 59ef36d3 638c7704 8cb0fbcd 3bf0cf78 39e668df 5401d89a'
'f9075710 9da190c8 f67eec41 0452f588 273dda31 649aab7b f825c2e2 706962c2'
'0c17e738 7b3698de 06f7af09 c8d18a76 1162d510 915a8097 e29dcd5f f3d4de9d'
'cac226da f2e3c61b 81b064be 82410461 9c4390ca 53825a15 a07ebf6d e2979bc9'
'c42c4de0 f57f3e83 cd7b5007 6a413799 6403ec86 2fd5c1d4 13b63683 36c6a2b6'
'c88bcb61 beb1009e 3a691572 c2799841 04bbee46 8827e700 4a9c535d c699e33e'
'cf01a521 471738fa 2a25c432 58d13be5 f0654189 ca5c1a56 880791a6 1039fb65'
'de1d9056 836a0a7f 139369b2 46a42b94 ed57ae ').replace(' ',''))
ALL_ZERO_OUTPOINT = hex_to_binary('00' * 36)
class PyTXTest(TiabTest):
def testSerializeUnserialize(self):
tx1 = PyTx().unserialize(tx1raw)
tx2 = PyTx().unserialize(BinaryUnpacker(tx2raw))
tx1again = tx1.serialize()
tx2again = tx2.serialize()
self.assertEqual(tx1again, tx1raw)
self.assertEqual(tx2again, tx2raw)
blk = PyBlock().unserialize( hex_to_binary(hexBlock) )
blockReHex = binary_to_hex(blk.serialize())
self.assertEqual(hexBlock, blockReHex)
binRoot = blk.blockData.getMerkleRoot()
self.assertEqual(blk.blockHeader.merkleRoot, blk.blockData.merkleRoot)
def testCreateTx(self):
addrA = PyBtcAddress().createFromPrivateKey(hex_to_int('aa' * 32))
addrB = PyBtcAddress().createFromPrivateKey(hex_to_int('bb' * 32))
# This TxIn will be completely ignored, so it can contain garbage
txinA = PyTxIn()
txinA.outpoint = PyOutPoint().unserialize(hex_to_binary('00'*36))
txinA.binScript = hex_to_binary('99'*4)
txinA.intSeq = hex_to_int('ff'*4)
# test binary unpacker in unserialize
testTxIn = PyTxIn().unserialize(txinA.serialize())
self.assertEqual(txinA.getScript(), testTxIn.getScript())
self.assertEqual(txinA.intSeq, testTxIn.intSeq)
self.assertEqual(txinA.outpoint.txHash, testTxIn.outpoint.txHash)
txoutA = PyTxOut()
txoutA.value = 50 * ONE_BTC
txoutA.binScript = '\x76\xa9\x14' + addrA.getAddr160() + '\x88\xac'
# Test pprint
print '\nTest pretty print PyTxIn, expect PrevTXHash all 0s'
testTxIn.pprint()
# test binary unpacker in unserialize
testTxOut = PyTxOut().unserialize(txoutA.serialize())
self.assertEqual(txoutA.getScript(), testTxOut.getScript())
self.assertEqual(txoutA.value, testTxOut.getValue())
# Test pprint
print '\nTest pretty print PyTxOut'
testTxOut.pprint()
tx1 = PyTx()
tx1.version = 1
tx1.numInputs = 1
tx1.inputs = [txinA]
tx1.numOutputs = 1
tx1.outputs = [txoutA]
tx1.locktime = 0
tx1hash = tx1.getHash()
recipientList = tx1.makeRecipientsList()
self.assertEqual(len(recipientList), 1)
self.assertEqual(recipientList[0][0], 0)
self.assertEqual(recipientList[0][1], 50 * ONE_BTC)
self.assertEqual(tx1.getHashHex(), binary_to_hex(tx1hash))
# Creating transaction to send coins from A to B
tx2 = PyCreateAndSignTx_old( [[ addrA, tx1, 0 ]], [[addrB, 50*ONE_BTC]])
psp = PyScriptProcessor()
psp.setTxObjects(tx1, tx2, 0)
self.assertTrue(psp.verifyTransactionValid())
def testVerifyTxFromFakeBlockChain(self):
psp = PyScriptProcessor()
psp.setTxObjects(tx1Fake, tx2Fake, 0)
self.assertTrue(psp.verifyTransactionValid())
def test2of2MultiSigTx(self):
tx1 = PyTx().unserialize(hex_to_binary('010000000189a0022c8291b4328338ec95179612b8ebf72067051de019a6084fb97eae0ebe000000004a4930460221009627882154854e3de066943ba96faba02bb8b80c1670a0a30d0408caa49f03df022100b625414510a2a66ebb43fffa3f4023744695380847ee1073117ec90cb60f2c8301ffffffff0210c18d0000000000434104a701496f10db6aa8acbb6a7aa14d62f4925f8da03de7f0262010025945f6ebcc3efd55b6aa4bc6f811a0dc1bbdd2644bdd81c8a63766aa11f650cd7736bbcaf8ac001bb7000000000043526b006b7dac7ca914fc1243972b59c1726735d3c5cca40e415039dce9879a6c936b7dac7ca914375dd72e03e7b5dbb49f7e843b7bef4a2cc2ce9e879a6c936b6c6ca200000000'))
tx2 = PyTx().unserialize(hex_to_binary('01000000011c9608650a912be7fa88eecec664e6fbfa4b676708697fa99c28b3370005f32d01000000fd1701483045022017462c29efc9158cf26f2070d444bb2b087b8a0e6287a9274fa36fad30c46485022100c6d4cc6cd504f768389637df71c1ccd452e0691348d0f418130c31da8cc2a6e8014104e83c1d4079a1b36417f0544063eadbc44833a992b9667ab29b4ff252d8287687bad7581581ae385854d4e5f1fcedce7de12b1aec1cb004cabb2ec1f3de9b2e60493046022100fdc7beb27de0c3a53fbf96df7ccf9518c5fe7873eeed413ce17e4c0e8bf9c06e022100cc15103b3c2e1f49d066897fe681a12e397e87ed7ee39f1c8c4a5fef30f4c2c60141047cf315904fcc2e3e2465153d39019e0d66a8aaec1cec1178feb10d46537427239fd64b81e41651e89b89fefe6a23561d25dddc835395dd3542f83b32a1906aebffffffff01c0d8a700000000001976a914fc1243972b59c1726735d3c5cca40e415039dce988ac00000000'))
# Verify 2-of-2 tx from Testnet
psp = PyScriptProcessor()
psp.setTxObjects(tx1, tx2, 0)
self.assertTrue(psp.verifyTransactionValid())
def test2of3MultiSigTx(self):
tx1 = PyTx().unserialize(hex_to_binary('010000000371c06e0639dbe6bc35e6f948da4874ae69d9d91934ec7c5366292d0cbd5f97b0010000008a47304402200117cdd3ec6259af29acea44db354a6f57ac10d8496782033f5fe0febfd77f1b02202ceb02d60dbb43e6d4e03e5b5fbadc031f8bbb3c6c34ad307939947987f600bf01410452d63c092209529ca2c75e056e947bc95f9daffb371e601b46d24377aaa3d004ab3c6be2d6d262b34d736b95f3b0ef6876826c93c4077d619c02ebd974c7facdffffffffa65aa866aa7743ec05ba61418015fc32ecabd99886732056f1d4454c8f762bf8000000008c493046022100ea0a9b41c9372837e52898205c7bebf86b28936a3ee725672d0ca8f434f876f0022100beb7243a51fbc0997e55cb519d3b9cbd59f7aba68d80ba1e8adbb53443cda3c00141043efd1ca3cffc50638031281d227ff347a3a27bc145e2f846891d29f87bc068c27710559c4d9cd71f7e9e763d6e2753172406eb1ed1fadcaf9a8972b4270f05b4ffffffffd866d14151ee1b733a2a7273f155ecb25c18303c31b2c4de5aa6080aef2e0006000000008b483045022052210f95f6b413c74ce12cfc1b14a36cb267f9fa3919fa6e20dade1cd570439f022100b9e5b325f312904804f043d06c6ebc8e4b1c6cd272856c48ab1736b9d562e10c01410423fdddfe7e4d70d762dd6596771e035f4b43d54d28c2231be1102056f81f067914fe4fb6fd6e3381228ee5587ddd2028c846025741e963d9b1d6cf2c2dea0dbcffffffff0210ef3200000000004341048a33e9fd2de28137574cc69fe5620199abe37b7d08a51c528876fe6c5fa7fc28535f5a667244445e79fffc9df85ec3d79d77693b1f37af0e2d7c1fa2e7113a48acc0d454070000000061526b006b7dac7ca9143cd1def404e12a85ead2b4d3f5f9f817fb0d46ef879a6c936b7dac7ca9146a4e7d5f798e90e84db9244d4805459f87275943879a6c936b7dac7ca914486efdd300987a054510b4ce1148d4ad290d911e879a6c936b6c6ca200000000'))
tx2 = PyTx().unserialize(hex_to_binary('01000000012f654d4d1d7246d1a824c5b6c5177c0b5a1983864579aabb88cabd5d05e032e201000000fda0014730440220151ad44e7f78f9e0c4a3f2135c19ca3de8dbbb7c58893db096c0c5f1573d5dec02200724a78c3fa5f153103cb46816df46eb6cfac3718038607ddec344310066161e01410459fd82189b81772258a3fc723fdda900eb8193057d4a573ee5ad39e26b58b5c12c4a51b0edd01769f96ed1998221daf0df89634a7137a8fa312d5ccc95ed8925483045022100ca34834ece5925cff6c3d63e2bda6b0ce0685b18f481c32e70de9a971e85f12f0220572d0b5de0cf7b8d4e28f4914a955e301faaaa42f05feaa1cc63b45f938d75d9014104ce6242d72ee67e867e6f8ec434b95fcb1889c5b485ec3414df407e11194a7ce012eda021b68f1dd124598a9b677d6e7d7c95b1b7347f5c5a08efa628ef0204e1483045022074e01e8225e8c4f9d0b3f86908d42a61e611f406e13817d16240f94f52f49359022100f4c768dd89c6435afd3834ae2c882465ade92d7e1cc5c2c2c3d8d25c41b3ea61014104ce66c9f5068b715b62cc1622572cd98a08812d8ca01563045263c3e7af6b997e603e8e62041c4eb82dfd386a3412c34c334c34eb3c76fb0e37483fc72323f807ffffffff01b0ad5407000000001976a9146a4e7d5f798e90e84db9244d4805459f8727594388ac00000000'))
# Verify 2-of-3 tx from Testnet
psp = PyScriptProcessor()
psp.setTxObjects(tx1, tx2, 0)
self.assertTrue(psp.verifyTransactionValid())
def testMultiSig(self):
tx1 = PyTx().unserialize(hex_to_binary('0100000001845ad165bdc0f9b5829cf5a594c4148dfd89e24756303f3a8dabeb597afa589b010000008b483045022063c233df8efa3d1885e069e375a8eabf16b23475ef21bdc9628a513ee4caceb702210090a102c7b602043e72b34a154d495ac19b3b9e42acb962c399451f2baead8f4c014104b38f79037ad25b84a564eaf53ede93dec70b35216e6682aa71a47cefa2996ec49acfbb0a8730577c62ef9a7cc20c740aaaaee75419bef9640a4216c2b49c42d3ffffffff02000c022900000000434104c08c0a71ccbe838403e3870aa1ab871b0ab3a6014b0ba41f6df2b9aefea73134ecaa0b27797620e402a33799e9047f86519d9e43bbd504cf753c293752933f4fac406f40010000000062537a7652a269537a829178a91480677c5392220db736455533477d0bc2fba65502879b69537a829178a91402d7aa2e76d9066fb2b3c41ff8839a5c81bdca19879b69537a829178a91410039ce4fdb5d4ee56148fe3935b9bfbbe4ecc89879b6953ae00000000'))
tx2 = PyTx().unserialize(hex_to_binary('0100000001bb664ff716b9dfc831bcc666c1767f362ad467fcfbaf4961de92e45547daab8701000000fd190100493046022100d73f633f114e0e0b324d87d38d34f22966a03b072803afa99c9408201f6d6dc6022100900e85be52ad2278d24e7edbb7269367f5f2d6f1bd338d017ca460008776614401473044022071fef8ac0aa6318817dbd242bf51fb5b75be312aa31ecb44a0afe7b49fcf840302204c223179a383bb6fcb80312ac66e473345065f7d9136f9662d867acf96c12a42015241048c006ff0d2cfde86455086af5a25b88c2b81858aab67f6a3132c885a2cb9ec38e700576fd46c7d72d7d22555eee3a14e2876c643cd70b1b0a77fbf46e62331ac4104b68ef7d8f24d45e1771101e269c0aacf8d3ed7ebe12b65521712bba768ef53e1e84fff3afbee360acea0d1f461c013557f71d426ac17a293c5eebf06e468253e00ffffffff0280969800000000001976a9140817482d2e97e4be877efe59f4bae108564549f188ac7015a7000000000062537a7652a269537a829178a91480677c5392220db736455533477d0bc2fba65502879b69537a829178a91402d7aa2e76d9066fb2b3c41ff8839a5c81bdca19879b69537a829178a91410039ce4fdb5d4ee56148fe3935b9bfbbe4ecc89879b6953ae00000000'))
# OP_CHECKMULTISIG from Testnet
psp = PyScriptProcessor()
psp.setTxObjects(tx1, tx2, 0)
self.assertTrue(psp.verifyTransactionValid())
'''
def testMultiSigAddrExtraction(self):
script1 = hex_to_binary('4104b54b5fc1917945fff64785d4baaca66a9704e9ed26002f51f53763499643321fbc047683a62be16e114e25404ce6ffdcf625a928002403402bf9f01e5cbd5f3dad4104f576e534f9bbf6d7c5f186ff4c6e0c5442c2755314bdee62fbc656f94d6cbf32c5eb3522da21cf9f954133000ffccb20dbfec030737640cc3315ce09619210d0ac')
expectedBtcAddrList1 = ['1KmV9FdKJEFFCHydZUZGdBL9uKq2T9JUm8','13maaQeK5qSPjHwnHhwNUtNKruK3qYLwvv']
self.verifyMultiSigAddrExtraction(script1, expectedBtcAddrList1)
script2 = hex_to_binary('537a7652a269537a829178a91480677c5392220db736455533477d0bc2fba65502879b69537a829178a91402d7aa2e76d9066fb2b3c41ff8839a5c81bdca19879b69537a829178a91410039ce4fdb5d4ee56148fe3935b9bfbbe4ecc89879b6953ae')
expectedBtcAddrList2 = ['1ChwTs5Dmh6y9iDh4pjWyu2X6nAhjre7SV','1G2i31fxRqaoXBfYMuE4YKb9x96uYcHeQ','12Tg96ZPSYc3P2g5c9c4znFFH2whriN9NQ']
self.verifyMultiSigAddrExtraction(script2, expectedBtcAddrList2)
script3 = hex_to_binary('527a7651a269527a829178a914731cdb75c88a01cbb96729888f726b3b9f29277a879b69527a829178a914e9b4261c6122f8957683636548923acc069e8141879b6952ae')
expectedBtcAddrList3 = ['1BVfH6iKT1s8fYEVSj39QkJrPqCKN4hv2m','1NJiFfFPZ177Pv96Yt4FCNZFEumyL2eKmt']
self.verifyMultiSigAddrExtraction(script3, expectedBtcAddrList3)
'''
def verifyMultiSigAddrExtraction(self, scr, expectedBtcAddrList):
addrList = getMultisigScriptInfo(scr)[2]
btcAddrList = []
for a in addrList:
btcAddrList.append(PyBtcAddress().createFromPublicKeyHash160(a).getAddrStr())
self.assertEqual(btcAddrList, expectedBtcAddrList)
def testUnpackUnserializePyOutPoint(self):
outpoint = PyOutPoint().unserialize(BinaryUnpacker(ALL_ZERO_OUTPOINT))
self.assertEqual(outpoint.txHash, hex_to_binary('00'*32))
self.assertEqual(outpoint.txOutIndex, 0)
def testCopyPyOutPoint(self):
outpoint = PyOutPoint().unserialize(BinaryUnpacker(ALL_ZERO_OUTPOINT))
outpointCopy = outpoint.copy()
self.assertEqual(outpoint.txHash, outpointCopy.txHash)
self.assertEqual(outpoint.txOutIndex, outpointCopy.txOutIndex)
def testPPrintPyOutPoint(self):
# No return value - Should just print 0s
outpoint = PyOutPoint().unserialize(BinaryUnpacker(ALL_ZERO_OUTPOINT))
print "PyOutPoint PPrint Test. Expect all 0s: "
outpoint.pprint()
'''
Does not pass because fromCpp is missing
def testCreateCppFromCppPyOutPoint(self):
outpoint = PyOutPoint().unserialize(BinaryUnpacker(ALL_ZERO_OUTPOINT))
outpointFromCpp = PyOutPoint().fromCpp(outpoint.createCpp())
self.assertEqual(outpoint.txHash, outpointFromCpp.txHash)
self.assertEqual(outpoint.txOutIndex, outpointFromCpp.txOutIndex)
'''
def testBogusBlockComponent(self):
class TestBlockComponent(BlockComponent):
pass
testBlkComp = TestBlockComponent()
self.assertRaises(NotImplementedError, testBlkComp.serialize)
self.assertRaises(NotImplementedError, testBlkComp.unserialize)
# TODO: Add some tests for the OP_CHECKMULTISIG support in TxDP
# Running tests with "python <module name>" will NOT work for any Armory tests
# You must run tests with "python -m unittest <module name>" or run all tests with "python -m unittest discover"
# if __name__ == "__main__":
# unittest.main()
| 75.754881
| 1,513
| 0.847064
| 2,337
| 34,923
| 12.615319
| 0.525888
| 0.005766
| 0.011193
| 0.007462
| 0.387491
| 0.377383
| 0.367004
| 0.35408
| 0.347772
| 0.347772
| 0
| 0.513413
| 0.111932
| 34,923
| 460
| 1,514
| 75.919565
| 0.43719
| 0.028205
| 0
| 0.148248
| 0
| 0
| 0.764898
| 0.368944
| 0
| 1
| 0
| 0.002174
| 0.067385
| 0
| null | null | 0.002695
| 0.024259
| null | null | 0.016173
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b0c73abe7d6c4c20a371f847f449694a60b9f43c
| 137
|
py
|
Python
|
Lesson17_FunctionArguments/record_library.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
Lesson17_FunctionArguments/record_library.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
Lesson17_FunctionArguments/record_library.py
|
StyvenSoft/degree-python
|
644953608948f341f5a20ceb9a02976a128b472b
|
[
"MIT"
] | null | null | null |
def place_record(album):
return
def rotate_record(album):
return
def drop_needle(album):
print("Playing album {}".format(album))
| 15.222222
| 41
| 0.729927
| 19
| 137
| 5.105263
| 0.578947
| 0.226804
| 0.350515
| 0.412371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138686
| 137
| 8
| 42
| 17.125
| 0.822034
| 0
| 0
| 0.333333
| 0
| 0
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.333333
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
b0ccc33c2e84e0017efc6e320749123bbc92dc31
| 13,441
|
py
|
Python
|
networks/network_dynamics.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
networks/network_dynamics.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
networks/network_dynamics.py
|
enflujo/COVID_schools_dashboard
|
702c9c3c91938e514e56f4cf6f325ed954d7bc3e
|
[
"Apache-2.0"
] | null | null | null |
import jax.numpy as np
import numpy as np2
from jax import random
# Intervention functions
def morning_set_intervention(Graphs_matrix, intervention_eff, hh_occupation=0.9):
# load networks
matrix_household = Graphs_matrix[0]
# matrix_household[2] = [val.values[0] for val in matrix_household[2]]
hh_row = np.asarray(np2.asarray(matrix_household[0]))
hh_col = np.asarray(np2.asarray(matrix_household[1]))
hh_data = np.asarray(np2.asarray(matrix_household[2]))
matrix_preschool = Graphs_matrix[1]
# matrix_preschool[2] = [val.values[0] for val in matrix_preschool[2]]
preschl_row = np.asarray(np2.asarray(matrix_preschool[0]))
preschl_col = np.asarray(np2.asarray(matrix_preschool[1]))
preschl_data = np.asarray(np2.asarray(matrix_preschool[2]))
matrix_primary = Graphs_matrix[2]
# matrix_primary[2] = [val.values[0] for val in matrix_primary[2]]
primary_row = np.asarray(np2.asarray(matrix_primary[0]))
primary_col = np.asarray(np2.asarray(matrix_primary[1]))
primary_data = np.asarray(np2.asarray(matrix_primary[2]))
matrix_highschool = Graphs_matrix[3]
# matrix_highschool[2] = [val.values[0] for val in matrix_highschool[2]]
highschl_row = np.asarray(np2.asarray(matrix_highschool[0]))
highschl_col = np.asarray(np2.asarray(matrix_highschool[1]))
highschl_data = np.asarray(np2.asarray(matrix_highschool[2]))
matrix_work = Graphs_matrix[4]
# matrix_work[2] = [val.values[0] for val in matrix_work[2]]
work_row = np.asarray(np2.asarray(matrix_work[0]))
work_col = np.asarray(np2.asarray(matrix_work[1]))
work_data = np.asarray(np2.asarray(matrix_work[2]))
matrix_community = Graphs_matrix[5]
# matrix_community[2] = [val.values[0] for val in matrix_community[2]]
comm_row = np.asarray(np2.asarray(matrix_community[0]))
comm_col = np.asarray(np2.asarray(matrix_community[1]))
comm_data = np.asarray(np2.asarray(matrix_community[2]))
# turn off school and work layers
preschl_data_set = 0 * preschl_data
primary_data_set = 0 * primary_data
highschl_data_set = 0 * highschl_data
work_data_set = 0 * work_data
# turn on portions of households and community
hh_occupation_intervention = hh_occupation * (1 - intervention_eff)
comm_occupation = 1 - hh_occupation
comm_occupation_intervention = comm_occupation * (1 - intervention_eff)
length = int(hh_data.shape[0] / 2)
hh_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(hh_occupation_intervention), shape=(length,)), 2)
hh_data_set = hh_data_select.reshape(hh_data_select.shape[0], 1) * hh_data
length = int(comm_data.shape[0] / 2)
comm_data_select = np.repeat(
random.bernoulli(random.PRNGKey(0), p=(comm_occupation_intervention), shape=(length,)), 2
)
comm_data_set = comm_data_select.reshape(comm_data_select.shape[0], 1) * comm_data
# create conections
args_ps = (hh_data_set, preschl_data_set, primary_data_set, highschl_data_set, work_data_set, comm_data_set)
ps = np.concatenate(args_ps)
ps = ps.reshape(
ps.shape[0],
)
args_rows = (hh_row, preschl_row, primary_row, highschl_row, work_row, comm_row)
rows = np.concatenate(args_rows)
args_cols = (hh_col, preschl_col, primary_col, highschl_col, work_col, comm_col)
cols = np.concatenate(args_cols)
w = [rows.astype(np.int32), cols.astype(np.int32), ps]
return w
def day_set_intervention(
Graphs_matrix, intervention_eff, schl_occupation, work_occupation, schl_altern=False, hh_occupation=0.3
):
# load networks
matrix_household = Graphs_matrix[0]
# matrix_household[2] = [val.values[0] for val in matrix_household[2]]
hh_row = np.asarray(np2.asarray(matrix_household[0]))
hh_col = np.asarray(np2.asarray(matrix_household[1]))
hh_data = np.asarray(np2.asarray(matrix_household[2]))
matrix_preschool = Graphs_matrix[1]
# matrix_preschool[2] = [val.values[0] for val in matrix_preschool[2]]
preschl_row = np.asarray(np2.asarray(matrix_preschool[0]))
preschl_col = np.asarray(np2.asarray(matrix_preschool[1]))
preschl_data = np.asarray(np2.asarray(matrix_preschool[2]))
matrix_primary = Graphs_matrix[2]
# matrix_primary[2] = [val.values[0] for val in matrix_primary[2]]
primary_row = np.asarray(np2.asarray(matrix_primary[0]))
primary_col = np.asarray(np2.asarray(matrix_primary[1]))
primary_data = np.asarray(np2.asarray(matrix_primary[2]))
matrix_highschool = Graphs_matrix[3]
# matrix_highschool[2] = [val.values[0] for val in matrix_highschool[2]]
highschl_row = np.asarray(np2.asarray(matrix_highschool[0]))
highschl_col = np.asarray(np2.asarray(matrix_highschool[1]))
highschl_data = np.asarray(np2.asarray(matrix_highschool[2]))
matrix_work = Graphs_matrix[4]
# matrix_work[2] = [val.values[0] for val in matrix_work[2]]
work_row = np.asarray(np2.asarray(matrix_work[0]))
work_col = np.asarray(np2.asarray(matrix_work[1]))
work_data = np.asarray(np2.asarray(matrix_work[2]))
matrix_community = Graphs_matrix[5]
# matrix_community[2] = [val.values[0] for val in matrix_community[2]]
comm_row = np.asarray(np2.asarray(matrix_community[0]))
comm_col = np.asarray(np2.asarray(matrix_community[1]))
comm_data = np.asarray(np2.asarray(matrix_community[2]))
# turn off portions of households and community
hh_occupation_intervention = hh_occupation * (1 - intervention_eff)
comm_occupation = 1 - hh_occupation
comm_occupation_intervention = comm_occupation * (1 - intervention_eff)
length = int(hh_data.shape[0] / 2)
hh_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(hh_occupation_intervention), shape=(length,)), 2)
hh_data_set = hh_data_select.reshape(hh_data_select.shape[0], 1) * hh_data
length = int(comm_data.shape[0] / 2)
comm_data_select = np.repeat(
random.bernoulli(random.PRNGKey(0), p=(comm_occupation_intervention), shape=(length,)), 2
)
comm_data_set = comm_data_select.reshape(comm_data_select.shape[0], 1) * comm_data
# turn off portions of school and work layers
if schl_occupation == 0:
preschl_data_set = 0 * preschl_data
primary_data_set = 0 * primary_data
highschl_data_set = 0 * highschl_data
elif schl_occupation == 1.0:
preschl_data_set = preschl_data
primary_data_set = primary_data
highschl_data_set = highschl_data
else:
length = int(preschl_data.shape[0] / 2)
preschl_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(schl_occupation), shape=(length,)), 2)
preschl_data_set = preschl_data_select.reshape(preschl_data_select.shape[0], 1) * preschl_data
length = int(primary_data.shape[0] / 2)
primary_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(schl_occupation), shape=(length,)), 2)
primary_data_set = primary_data_select.reshape(primary_data_select.shape[0], 1) * primary_data
length = int(highschl_data.shape[0] / 2)
highschl_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(schl_occupation), shape=(length,)), 2)
highschl_data_set = highschl_data_select.reshape(highschl_data_select.shape[0], 1) * highschl_data
# work_occuption_intervention = 1-intervention_eff
length = int(work_data.shape[0] / 2)
work_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(work_occupation), shape=(length,)), 2)
work_data_set = work_data_select.reshape(work_data_select.shape[0], 1) * work_data
if work_occupation == 0:
work_data_set = 0 * work_data # if work offices are fully closed
# create conections
args_ps = (hh_data_set, preschl_data_set, primary_data_set, highschl_data_set, work_data_set, comm_data_set)
ps = np.concatenate(args_ps)
ps = ps.reshape(
ps.shape[0],
)
args_rows = (hh_row, preschl_row, primary_row, highschl_row, work_row, comm_row)
rows = np.concatenate(args_rows)
args_cols = (hh_col, preschl_col, primary_col, highschl_col, work_col, comm_col)
cols = np.concatenate(args_cols)
w = [rows.astype(np.int32), cols.astype(np.int32), ps]
return w
def night_set_intervention(Graphs_matrix, intervention_eff, hh_occupation=0.7):
# load networks
matrix_household = Graphs_matrix[0]
# matrix_household[2] = [val.values[0] for val in matrix_household[2]]
hh_row = np.asarray(np2.asarray(matrix_household[0]))
hh_col = np.asarray(np2.asarray(matrix_household[1]))
hh_data = np.asarray(np2.asarray(matrix_household[2]))
matrix_preschool = Graphs_matrix[1]
# matrix_preschool[2] = [val.values[0] for val in matrix_preschool[2]]
preschl_row = np.asarray(np2.asarray(matrix_preschool[0]))
preschl_col = np.asarray(np2.asarray(matrix_preschool[1]))
preschl_data = np.asarray(np2.asarray(matrix_preschool[2]))
matrix_primary = Graphs_matrix[2]
# matrix_primary[2] = [val.values[0] for val in matrix_primary[2]]
primary_row = np.asarray(np2.asarray(matrix_primary[0]))
primary_col = np.asarray(np2.asarray(matrix_primary[1]))
primary_data = np.asarray(np2.asarray(matrix_primary[2]))
matrix_highschool = Graphs_matrix[3]
# matrix_highschool[2] = [val.values[0] for val in matrix_highschool[2]]
highschl_row = np.asarray(np2.asarray(matrix_highschool[0]))
highschl_col = np.asarray(np2.asarray(matrix_highschool[1]))
highschl_data = np.asarray(np2.asarray(matrix_highschool[2]))
matrix_work = Graphs_matrix[4]
# matrix_work[2] = [val.values[0] for val in matrix_work[2]]
work_row = np.asarray(np2.asarray(matrix_work[0]))
work_col = np.asarray(np2.asarray(matrix_work[1]))
work_data = np.asarray(np2.asarray(matrix_work[2]))
matrix_community = Graphs_matrix[5]
# matrix_community[2] = [val.values[0] for val in matrix_community[2]]
comm_row = np.asarray(np2.asarray(matrix_community[0]))
comm_col = np.asarray(np2.asarray(matrix_community[1]))
comm_data = np.asarray(np2.asarray(matrix_community[2]))
# turn off school and work layers
preschl_data_set = 0 * preschl_data
primary_data_set = 0 * primary_data
highschl_data_set = 0 * highschl_data
work_data_set = 0 * work_data
# turn on portions of households and community
hh_occupation_intervention = hh_occupation * (1 - intervention_eff)
comm_occupation = 1 - hh_occupation
comm_occupation_intervention = comm_occupation * (1 - intervention_eff)
length = int(hh_data.shape[0] / 2)
hh_data_select = np.repeat(random.bernoulli(random.PRNGKey(0), p=(hh_occupation_intervention), shape=(length,)), 2)
hh_data_set = hh_data_select.reshape(hh_data_select.shape[0], 1) * hh_data
length = int(comm_data.shape[0] / 2)
comm_data_select = np.repeat(
random.bernoulli(random.PRNGKey(0), p=(comm_occupation_intervention), shape=(length,)), 2
)
comm_data_set = comm_data_select.reshape(comm_data_select.shape[0], 1) * comm_data
# create conections
args_ps = (hh_data_set, preschl_data_set, primary_data_set, highschl_data_set, work_data_set, comm_data_set)
ps = np.concatenate(args_ps)
ps = ps.reshape(
ps.shape[0],
)
args_rows = (hh_row, preschl_row, primary_row, highschl_row, work_row, comm_row)
rows = np.concatenate(args_rows)
args_cols = (hh_col, preschl_col, primary_col, highschl_col, work_col, comm_col)
cols = np.concatenate(args_cols)
w = [rows.astype(np.int32), cols.astype(np.int32), ps]
return w
def create_day_intervention_dynamics(
Graphs_matrix, Tmax, total_steps, schools_day_open, interv_glob, schl_occupation, work_occupation
):
"""
A day is devided in 3 partitions with consists of sets of hours over a day
partition[0] -> morning: only a % of households and community is activated
partition[1] -> evening: only work and school layers are activated
partition[2] -> night: only a % of households and community is activated
delta_t -> steps over a day
"""
# Hours distribution in a day
partitions = [8, 8, 8]
steps_per_days = int(total_steps / Tmax)
m_day = int(steps_per_days * (partitions[0] / 24))
e_day = int(steps_per_days * (partitions[1] / 24))
n_day = int(steps_per_days * (partitions[2] / 24))
days_intervals = [m_day, e_day, n_day]
m_w_interv = morning_set_intervention(Graphs_matrix, interv_glob)
e_w_interv_schl_close = day_set_intervention(
Graphs_matrix, interv_glob, schl_occupation=0, work_occupation=work_occupation
)
e_w_interv_schl_open = day_set_intervention(
Graphs_matrix, interv_glob, schl_occupation=schl_occupation, work_occupation=work_occupation
)
n_w_interv = night_set_intervention(Graphs_matrix, interv_glob)
w_interv_intervals_schl_close = [m_w_interv, e_w_interv_schl_close, n_w_interv]
w_interv_intervals_schl_open = [m_w_interv, e_w_interv_schl_open, n_w_interv]
sim_intervals = [] # iterations per network set w
sim_ws = [] # networks per iteration
for d in range(Tmax):
if d < schools_day_open:
sim_intervals.extend(days_intervals)
sim_ws.extend(w_interv_intervals_schl_close)
else:
sim_intervals.extend(days_intervals)
sim_ws.extend(w_interv_intervals_schl_open)
return sim_intervals, sim_ws
| 44.068852
| 119
| 0.720036
| 1,977
| 13,441
| 4.596358
| 0.064745
| 0.053483
| 0.071311
| 0.112909
| 0.871905
| 0.830747
| 0.806537
| 0.802135
| 0.793331
| 0.769341
| 0
| 0.02723
| 0.166654
| 13,441
| 304
| 120
| 44.213816
| 0.784037
| 0.152816
| 0
| 0.728155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019417
| false
| 0
| 0.014563
| 0
| 0.053398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b0f1157d9b25ddd968a718280f245c843c89f5ba
| 14,026
|
py
|
Python
|
tests/grammar/factory/control/test_group.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 37
|
2015-04-21T15:33:53.000Z
|
2022-02-07T00:02:29.000Z
|
tests/grammar/factory/control/test_group.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 86
|
2015-02-01T22:26:02.000Z
|
2021-07-09T08:49:36.000Z
|
tests/grammar/factory/control/test_group.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 27
|
2015-01-26T16:01:09.000Z
|
2021-11-08T23:53:55.000Z
|
# -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Administrator Information grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestGroupInformationGrammar(unittest.TestCase):
def setUp(self):
self.grammar = self.grammar = get_record_grammar('group_info')
def test_agreement_min(self):
header = 'GRHAGR0000102.100130400001 '
trailer = 'GRT012340123456701234567 '
record = header + '\n' + _agreement_short() + '\n' + trailer
result = self.grammar.parseString(record)[0]
self.assertEqual('GRH', result.group_header.record_type)
self.assertEqual('GRT', result.group_trailer.record_type)
transaction = result.transactions[0]
self.assertEqual('AGR', transaction[0].record_type)
def test_agreement_short(self):
header = 'GRHAGR0000102.100130400001 '
trailer = 'GRT012340123456701234567'
record = header + '\n' + _agreement_short() + '\n' + trailer
result = self.grammar.parseString(record)[0]
self.assertEqual('GRH', result.group_header.record_type)
self.assertEqual('GRT', result.group_trailer.record_type)
transaction = result.transactions[0]
self.assertEqual('AGR', transaction[0].record_type)
def test_agreement_short_b(self):
header = 'GRHAGR0123402.100123456789 '
trailer = 'GRT012340123456701234567'
agreement_record_1 = _agreement_record_big()
agreement_record_2 = _agreement_record_big()
record = header + '\n' + agreement_record_1 + '\n' + agreement_record_2 + '\n' + trailer
result = self.grammar.parseString(record)[0]
self.assertEqual('GRH', result.group_header.record_type)
self.assertEqual('GRT', result.group_trailer.record_type)
transactions = result.transactions
self.assertEqual(2, len(transactions))
transaction = transactions[0]
self.assertEqual(21, len(transaction))
self.assertEqual('AGR', transaction[0].record_type)
self.assertEqual('TER', transaction[1].record_type)
self.assertEqual('TER', transaction[2].record_type)
self.assertEqual('IPA', transaction[3].record_type)
self.assertEqual('NPA', transaction[4].record_type)
self.assertEqual('IPA', transaction[5].record_type)
self.assertEqual('NPA', transaction[6].record_type)
self.assertEqual('IPA', transaction[7].record_type)
self.assertEqual('NPA', transaction[8].record_type)
self.assertEqual('IPA', transaction[9].record_type)
self.assertEqual('NPA', transaction[10].record_type)
self.assertEqual('TER', transaction[11].record_type)
self.assertEqual('TER', transaction[12].record_type)
self.assertEqual('IPA', transaction[13].record_type)
self.assertEqual('NPA', transaction[14].record_type)
self.assertEqual('IPA', transaction[15].record_type)
self.assertEqual('NPA', transaction[16].record_type)
self.assertEqual('IPA', transaction[17].record_type)
self.assertEqual('NPA', transaction[18].record_type)
self.assertEqual('IPA', transaction[19].record_type)
def test_agreement_small_pair(self):
header = 'GRHAGR0000102.100130400001 '
trailer = 'GRT000010000017900000719 0000000000'
record = header + '\n' + _agreement_short() + '\n' + _agreement_short() + '\n' + trailer
result = self.grammar.parseString(record)[0]
self.assertEqual('GRH', result.group_header.record_type)
self.assertEqual('GRT', result.group_trailer.record_type)
transaction = result.transactions[0]
self.assertEqual('AGR', transaction[0].record_type)
def test_agreement_full(self):
header = 'GRHAGR0123402.100123456789 '
trailer = 'GRT012340123456701234567 '
agreement_record_1 = _agreement_record_big()
agreement_record_2 = _agreement_record_big()
record = header + '\n' + agreement_record_1 + '\n' + agreement_record_2 + '\n' + trailer
result = self.grammar.parseString(record)[0]
self.assertEqual('GRH', result.group_header.record_type)
self.assertEqual('GRT', result.group_trailer.record_type)
transactions = result.transactions
self.assertEqual(2, len(transactions))
transaction = transactions[0]
self.assertEqual(21, len(transaction))
self.assertEqual('AGR', transaction[0].record_type)
self.assertEqual('TER', transaction[1].record_type)
self.assertEqual('TER', transaction[2].record_type)
self.assertEqual('IPA', transaction[3].record_type)
self.assertEqual('NPA', transaction[4].record_type)
self.assertEqual('IPA', transaction[5].record_type)
self.assertEqual('NPA', transaction[6].record_type)
self.assertEqual('IPA', transaction[7].record_type)
self.assertEqual('NPA', transaction[8].record_type)
self.assertEqual('IPA', transaction[9].record_type)
self.assertEqual('NPA', transaction[10].record_type)
self.assertEqual('TER', transaction[11].record_type)
self.assertEqual('TER', transaction[12].record_type)
self.assertEqual('IPA', transaction[13].record_type)
self.assertEqual('NPA', transaction[14].record_type)
self.assertEqual('IPA', transaction[15].record_type)
self.assertEqual('NPA', transaction[16].record_type)
self.assertEqual('IPA', transaction[17].record_type)
self.assertEqual('NPA', transaction[18].record_type)
self.assertEqual('IPA', transaction[19].record_type)
self.assertEqual('NPA', transaction[20].record_type)
class TestGroupInformationGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = self.grammar = get_record_grammar('group_info')
def test_empty(self):
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
def _work_big():
return 'NWR0000019900000000WORK NAME 1450455 00000000 UNC000000YMTX ORI ORIORI N00000000000U Y' + '\n' + \
'SPU0000019900000702014271370 MUSIC SOCIETY E 005101734040102328568410061 0500061 1000061 10000 0000000000000 OS ' + '\n' + \
'SPU00000199000007030166 ANOTHER SOCIETY AM 002501650060477617137010061 0000061 0000061 00000 0000000000000 PS ' + '\n' + \
'SPU00000199000007040170 YET ANOTHER SOCIETY SE 002261445930035870006610059 00000 00000 00000 0000000000000 PG ' + '\n' + \
'SPT000001990000070570 050000500005000I0484Y001' + '\n' + \
'SWR00000199000007061185684 A NAME YET ANOTHER NAME C 0026058307861 0500061 0000061 00000 0000260582865 ' + '\n' + \
'SWT00000199000007071185684 050000500005000I0484Y001' + '\n' + \
'PWR00000199000007084271370 MUSIC SOCIETY 01023285684100 1185684 ' + '\n' + \
'PER0000019900000709A NAME 000000000000000000000000' + '\n' + \
'REC000001990000071019980101 000300 A COMPILATION P A I _AR_ 33002 U '
def _agreement_record_big():
agreement = 'AGR0000123400000023C1234567890123D1234567890123OG201201022013020320140304D20100405D201605062017060701234MYY0123456789012A'
territory_1 = 'TER0000123400000023I0020'
territory_2 = 'TER0000123400000023I0020'
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
assignor_1 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
assignor_2 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
acquirer_1 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
acquirer_2 = ipa + '\n' + npa
agr_territory_1 = territory_1 + '\n' + territory_2 + '\n' + assignor_1 + '\n' + assignor_2 + '\n' + acquirer_1 + '\n' + acquirer_2
territory_1 = 'TER0000123400000023I0020'
territory_2 = 'TER0000123400000023I0020'
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
assignor_1 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
assignor_2 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
acquirer_1 = ipa + '\n' + npa
ipa = 'IPA0000123400000023AC01234567890I-000000229-7A12345678LAST NAME FIRST NAME 009020500100300001102312'
npa = 'NPA0000123400000023012345678PARTY NAME PARTY WRITER NAME ES'
acquirer_2 = ipa + '\n' + npa
agr_territory_2 = territory_1 + '\n' + territory_2 + '\n' + assignor_1 + '\n' + assignor_2 + '\n' + acquirer_1 + '\n' + acquirer_2
return agreement + '\n' + agr_territory_1 + '\n' + agr_territory_2
def _agreement_short():
agr_1 = 'AGR000000000000000000023683606100 OS200311182013111820131118N D20131118 00009SYY '
ter_1_1 = 'TER0000000000000000I2136'
ipa_1_1 = 'IPA0000000000000001AS0026166137500000000000001183606 ITALIAN GILBERTI DUANTE 61 0500061 0000061 00000'
ipa_1_2 = 'IPA0000000000000002AC00250165006000000000000066 SOCIETY MUSIC 61 0500061 1000061 10000'
return agr_1 + '\n' + ter_1_1 + '\n' + ipa_1_1 + '\n' + ipa_1_2
| 51.189781
| 362
| 0.519179
| 1,047
| 14,026
| 6.774594
| 0.169054
| 0.122656
| 0.086846
| 0.155082
| 0.761032
| 0.751022
| 0.743127
| 0.743127
| 0.725927
| 0.725927
| 0
| 0.246113
| 0.403821
| 14,026
| 273
| 363
| 51.377289
| 0.602129
| 0.001497
| 0
| 0.740741
| 0
| 0
| 0.497198
| 0.139819
| 0
| 0
| 0
| 0
| 0.37037
| 1
| 0.074074
| false
| 0
| 0.018519
| 0.006173
| 0.123457
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9fe7a6103019700f5924a45bc7f5b5267ff68577
| 1,549
|
py
|
Python
|
stayhome/geodata/migrations/0009_auto_20200319_1439.py
|
pavax/stayhomech
|
e661e042f2976bf380dc71a42f99930ce009f724
|
[
"MIT"
] | 3
|
2020-03-20T11:01:57.000Z
|
2020-03-20T16:29:12.000Z
|
stayhome/geodata/migrations/0009_auto_20200319_1439.py
|
pavax/stayhomech
|
e661e042f2976bf380dc71a42f99930ce009f724
|
[
"MIT"
] | 74
|
2020-03-23T21:35:07.000Z
|
2020-04-27T12:55:50.000Z
|
stayhome/geodata/migrations/0009_auto_20200319_1439.py
|
pavax/stayhomech
|
e661e042f2976bf380dc71a42f99930ce009f724
|
[
"MIT"
] | 3
|
2020-03-20T11:02:35.000Z
|
2020-03-20T16:29:23.000Z
|
# Generated by Django 3.0.4 on 2020-03-19 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geodata', '0008_auto_20200315_1940'),
]
operations = [
migrations.AddField(
model_name='municipality',
name='name_de',
field=models.CharField(max_length=30, null=True),
),
migrations.AddField(
model_name='municipality',
name='name_en',
field=models.CharField(max_length=30, null=True),
),
migrations.AddField(
model_name='municipality',
name='name_fr',
field=models.CharField(max_length=30, null=True),
),
migrations.AddField(
model_name='municipality',
name='name_it',
field=models.CharField(max_length=30, null=True),
),
migrations.AddField(
model_name='npa',
name='name_de',
field=models.CharField(max_length=27, null=True),
),
migrations.AddField(
model_name='npa',
name='name_en',
field=models.CharField(max_length=27, null=True),
),
migrations.AddField(
model_name='npa',
name='name_fr',
field=models.CharField(max_length=27, null=True),
),
migrations.AddField(
model_name='npa',
name='name_it',
field=models.CharField(max_length=27, null=True),
),
]
| 28.685185
| 61
| 0.545513
| 157
| 1,549
| 5.210191
| 0.273885
| 0.176039
| 0.224939
| 0.264059
| 0.816626
| 0.816626
| 0.816626
| 0.768949
| 0.691932
| 0.678484
| 0
| 0.045587
| 0.334409
| 1,549
| 53
| 62
| 29.226415
| 0.747818
| 0.029051
| 0
| 0.851064
| 1
| 0
| 0.097204
| 0.015313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b00857fe1f229782497c52b5af1cef1a9c427432
| 13,256
|
py
|
Python
|
findfile/find.py
|
yangheng95/findfile
|
38d2e0c0ec4234bc1664b84cb2532b4f920bd46c
|
[
"MIT"
] | 4
|
2021-11-12T09:41:42.000Z
|
2022-03-23T03:32:51.000Z
|
findfile/find.py
|
yangheng95/findfile
|
38d2e0c0ec4234bc1664b84cb2532b4f920bd46c
|
[
"MIT"
] | null | null | null |
findfile/find.py
|
yangheng95/findfile
|
38d2e0c0ec4234bc1664b84cb2532b4f920bd46c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# file: find.py
# time: 2021/8/4
# author: yangheng <yangheng@m.scnu.edu.cn>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import os
import re
from functools import reduce
def accessible(search_path):
try:
os.listdir(search_path)
except OSError:
return False
return True
def covert_path_sep(key_list):
new_key_list = []
for key in key_list:
new_key_list.append(key.replace('/', os.sep))
return new_key_list
def find_files(search_path: str,
key='',
exclude_key=None,
use_regex=False,
recursive=True,
return_relative_path=True) -> list:
'''
'search_path': path to search
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
:return the files whose path contains the key(s)
'''
if not use_regex:
key = covert_path_sep(key)
if not search_path:
search_path = os.getcwd()
res = []
if not exclude_key:
exclude_key = []
if isinstance(exclude_key, str):
exclude_key = [exclude_key]
if isinstance(key, str):
key = [key]
if os.path.isfile(search_path):
has_key = True
for k in key:
try:
if use_regex:
if not re.findall(k.lower(), search_path.lower()):
has_key = False
break
else:
if not k.lower() in search_path.lower():
has_key = False
break
except re.error:
print('Regex pattern error, using string-based search')
if not k.lower() in search_path.lower():
has_key = False
break
if has_key:
if exclude_key:
has_exclude_key = False
for ex_key in exclude_key:
try:
if use_regex:
if re.findall(ex_key.lower(), search_path.lower()):
has_exclude_key = True
break
else:
if ex_key.lower() in search_path.lower():
has_exclude_key = True
break
except re.error:
print('Regex pattern error, using string-based search')
if ex_key.lower() in search_path.lower():
has_exclude_key = True
break
if not has_exclude_key:
res.append(search_path.replace(os.getcwd() + os.sep, '') if return_relative_path else search_path)
else:
res.append(search_path.replace(os.getcwd() + os.sep, '') if return_relative_path else search_path)
if os.path.isdir(search_path) and accessible(search_path):
items = os.listdir(search_path)
for file in items:
if recursive:
res += find_files(os.path.join(search_path, file), key, exclude_key, use_regex=use_regex, recursive=recursive)
return res
def find_file(search_path: str,
key='',
exclude_key=None,
use_regex=False,
recursive=True,
return_relative_path=True,
return_deepest_path=False,
disable_alert=False) -> str:
'''
'search_path': path to search
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
'return_deepest_path' True/False to return the deepest/shortest path if multiple targets found
'disable_alert' no alert if multiple targets found
:return the file whose path contains the key(s)
'''
res = find_files(search_path=search_path,
key=key,
exclude_key=exclude_key,
use_regex=use_regex,
recursive=recursive,
return_relative_path=return_relative_path)
if len(res) > 1 and not disable_alert:
print('FindFile Warning: multiple targets {} found but return the {} path'.format(res, 'deepest' if return_deepest_path else 'shortest'))
if not return_deepest_path:
return reduce(lambda x, y: x if len(x) < len(y) else y, res) if res else None
else:
return reduce(lambda x, y: x if len(x) > len(y) else y, res) if res else None
def find_dirs(search_path: str,
key='',
exclude_key=None,
use_regex=False,
recursive=True,
return_relative_path=True) -> list:
'''
'search_path': path to search
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
:return the dirs whose path contains the key(s)
'''
if not use_regex:
key = covert_path_sep(key)
if not search_path:
search_path = os.getcwd()
res = []
if not exclude_key:
exclude_key = []
if isinstance(exclude_key, str):
exclude_key = [exclude_key]
if isinstance(key, str):
key = [key]
if os.path.isdir(search_path):
has_key = True
for k in key:
try:
if use_regex:
if not re.findall(k.lower(), search_path.lower()):
has_key = False
break
else:
if not k.lower() in search_path.lower():
has_key = False
break
except re.error:
print('Regex pattern error, using string-based search')
if not k.lower() in search_path.lower():
has_key = False
break
if has_key:
if exclude_key:
has_exclude_key = False
for ex_key in exclude_key:
try:
if use_regex:
if re.findall(ex_key.lower(), search_path.lower()):
has_exclude_key = True
break
else:
if ex_key.lower() in search_path.lower():
has_exclude_key = True
break
except re.error:
print('Regex pattern error, using string-based search')
if ex_key.lower() in search_path.lower():
has_exclude_key = True
break
if not has_exclude_key:
res.append(search_path.replace(os.getcwd() + os.sep, '') if return_relative_path else search_path)
else:
res.append(search_path.replace(os.getcwd() + os.sep, '') if return_relative_path else search_path)
if os.path.isdir(search_path) and accessible(search_path):
items = os.listdir(search_path)
for file in items:
if recursive:
res += find_dirs(os.path.join(search_path, file), key, exclude_key, use_regex, recursive)
return res
def find_dir(search_path: str,
key='',
exclude_key=None,
use_regex=False,
recursive=True,
return_relative_path=True,
return_deepest_path=False,
disable_alert=False) -> str:
'''
'search_path': path to search
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
'return_deepest_path' True/False to return the deepest/shortest path if multiple targets found
'disable_alert' no alert if multiple targets found
:return the dir path
'''
res = find_dirs(search_path=search_path,
key=key,
exclude_key=exclude_key,
use_regex=use_regex,
recursive=recursive,
return_relative_path=return_relative_path)
if len(res) > 1 and not disable_alert:
print('FindFile Warning: multiple targets {} found but return the {} path'.format(res, 'deepest' if return_deepest_path else 'shortest'))
if not return_deepest_path:
return reduce(lambda x, y: x if len(x) < len(y) else y, res) if res else None
else:
return reduce(lambda x, y: x if len(x) > len(y) else y, res) if res else None
def find_cwd_file(key='',
use_regex=False,
exclude_key=None,
recursive=True,
return_relative_path=True,
return_deepest_path=False,
disable_alert=False):
'''
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
'return_deepest_path' True/False to return the deepest/shortest path if multiple targets found
'disable_alert' no alert if multiple targets found
:return the target file path in current working directory
'''
return find_file(search_path=os.getcwd(),
key=key,
use_regex=use_regex,
exclude_key=exclude_key,
recursive=recursive,
return_relative_path=return_relative_path,
return_deepest_path=return_deepest_path,
disable_alert=disable_alert)
def find_cwd_files(key='',
use_regex=False,
exclude_key=None,
recursive=True,
return_relative_path=True):
'''
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
:return the target files' path in current working directory
'''
return find_files(search_path=os.getcwd(),
key=key,
exclude_key=exclude_key,
use_regex=use_regex,
recursive=recursive,
return_relative_path=return_relative_path)
def find_cwd_dir(key='',
use_regex=False,
exclude_key=None,
recursive=True,
return_relative_path=True,
return_deepest_path=False,
disable_alert=False):
'''
'key': find a set of files/dirs whose absolute path contain the 'key',
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
'return_deepest_path' True/False to return the deepest/shortest path if multiple targets found
'disable_alert' no alert if multiple targets found
:return the target dir path in current working directory
'''
return find_dir(search_path=os.getcwd(),
key=key,
use_regex=use_regex,
exclude_key=exclude_key,
recursive=recursive,
return_relative_path=return_relative_path,
return_deepest_path=return_deepest_path,
disable_alert=disable_alert)
def find_cwd_dirs(key='',
exclude_key=None,
use_regex=False,
recursive=True,
return_relative_path=True):
'''
'key': find a set of files/dirs whose absolute path contain the 'key'
'exclude_key': file whose absolute path contains 'exclude_key' will be ignored
'recursive' recursive search in dir_path
'return_relative_path' return the relative path instead of absolute path
:return the target dirs' path in current working directory
'''
return find_dirs(search_path=os.getcwd(),
key=key,
exclude_key=exclude_key,
use_regex=use_regex,
recursive=recursive,
return_relative_path=return_relative_path)
| 37.659091
| 145
| 0.570081
| 1,585
| 13,256
| 4.564038
| 0.075079
| 0.085706
| 0.079624
| 0.053083
| 0.938347
| 0.928808
| 0.9248
| 0.903235
| 0.903235
| 0.903235
| 0
| 0.00176
| 0.357121
| 13,256
| 351
| 146
| 37.766382
| 0.847102
| 0.255658
| 0
| 0.865217
| 0
| 0
| 0.036278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.013043
| 0
| 0.113043
| 0.026087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c6f7ab392868016be881d157d5be90fc093a9add
| 101
|
py
|
Python
|
common_utils/__init__.py
|
drsh4rky/python-hacking
|
798c2cddd8cb7a89ed310fbdb2c63ed9467c4048
|
[
"MIT"
] | 32
|
2019-08-11T12:48:06.000Z
|
2022-02-24T03:07:12.000Z
|
common_utils/__init__.py
|
drsh4rky/python-hacking
|
798c2cddd8cb7a89ed310fbdb2c63ed9467c4048
|
[
"MIT"
] | null | null | null |
common_utils/__init__.py
|
drsh4rky/python-hacking
|
798c2cddd8cb7a89ed310fbdb2c63ed9467c4048
|
[
"MIT"
] | 8
|
2019-12-28T07:45:07.000Z
|
2022-02-25T09:28:08.000Z
|
from . import file_utils, menu_utils, var_utils
__all__ = ["file_utils", "menu_utils", "var_utils"]
| 25.25
| 51
| 0.742574
| 15
| 101
| 4.333333
| 0.466667
| 0.276923
| 0.4
| 0.553846
| 0.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 101
| 3
| 52
| 33.666667
| 0.730337
| 0
| 0
| 0
| 0
| 0
| 0.287129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
af0b7f62849a36a656c6212a003b95896e4fed58
| 71,048
|
py
|
Python
|
loss.py
|
vanyoleyang/llll
|
ca2b880e1056af66a69d8c46c2f15c63904055f2
|
[
"MIT"
] | null | null | null |
loss.py
|
vanyoleyang/llll
|
ca2b880e1056af66a69d8c46c2f15c63904055f2
|
[
"MIT"
] | null | null | null |
loss.py
|
vanyoleyang/llll
|
ca2b880e1056af66a69d8c46c2f15c63904055f2
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
from utils import displayImage, displayMask
class FHADLoss(nn.Module):
def __init__(self, args):
super(FHADLoss, self).__init__()
# Initialize Parameters
self.args = args
self.alpha_2d = 2
self.alpha_3d = 10 #100
self.alpha_mask = 10 # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.img_size = 224
def forward(self, epoch, mask, predictions, targets, train=True):
x2d_pred, x3d_pred, _, theta, beta = predictions
joint_2d_targ, joint_3d_targ, _, _, _ = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]),
dim=3) # out2[:, :21, :]
joint_3d_pred, pred_min, pred_max = self.normalize_joints_scale(joint_3d_pred)
joint_3d_targ, targ_min, targ_max = self.normalize_joints_scale(joint_3d_targ)
_, _, maxp = self.normalize_joints_scale(joint_3d_pred)
_, _, maxt = self.normalize_joints_scale(joint_3d_targ)
joint_3d_pred = self.center_joints_scale(joint_3d_pred, maxp)
joint_3d_targ = self.center_joints_scale(joint_3d_targ, maxt)
joint_3d_pred, joint_3d_targ = joint_3d_pred[:, -1, :, :], joint_3d_targ[:, -1, :, :]
joint_2d_pred = torch.stack((x2d_pred[:, -1, :42:2], x2d_pred[:, -1, 1:42:2]), dim=2) # x_hat
joint_2d_targ = joint_2d_targ[:, -1, :, :]
loss_2d = torch.abs((joint_2d_pred.view(batch_size, -1) / self.img_size
- joint_2d_targ.view(batch_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
diff_2d = joint_2d_pred.view(batch_size, -1, 2) - joint_2d_targ.view(batch_size, -1,2)
diff_3d = joint_3d_pred.view(batch_size, -1, 3) - joint_3d_targ.view(batch_size, -1,3)
loss_3d = torch.pow(diff_3d.view(batch_size, -1), 2).sum(1).mean()
diff_3d = diff_3d * (pred_max - pred_min).repeat(1, 1, 21, 1)[:, -1, :, :].view(batch_size, -1, 3)
loss_3d = self.alpha_3d * loss_3d
theta_prev = torch.cat((theta[:, 0, :].unsqueeze(1), theta[:, :-1, :]), 1)
beta_prev = torch.cat((beta[:, 0, :].unsqueeze(1), beta[:, :-1, :]), 1)
pose_temp_loss = torch.pow(theta_prev.view(batch_size * seq_size, -1) - theta.view(batch_size * seq_size, -1), 2).sum(1).mean()
shape_temp_loss = torch.pow(beta_prev.view(batch_size * seq_size, -1) - beta.view(batch_size * seq_size, -1), 2).sum(1).mean()
loss_temp = 0.0005 * ( 0.1 * pose_temp_loss + 1. * shape_temp_loss)
loss_mask = torch.zeros(1).to(self.args.device)
loss_reg = torch.zeros(1).to(self.args.device)
loss_camera = torch.zeros(1).to(self.args.device)
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(self.args.n_kps):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.detach().cpu().numpy()), axis=2))
for i in range(self.args.n_kps):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_temp.item(), loss_reg.item(), loss_camera.item(), avg_distance_2d, avg_distance_3d]
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=2, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=2, keepdim=True)
hand_joints[:, :, :, 0] = (hand_joints[:, :, :, 0] - min_joints[:, :, :, 0]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 1] = (hand_joints[:, :, :, 1] - min_joints[:, :, :, 1]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 2] = (hand_joints[:, :, :, 2] - min_joints[:, :, :, 2]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, :, :, 0] = hand_joints[:, :, :, 0] - max_joints[:, :, :, 0]
hand_joints[:, :, :, 1] = hand_joints[:, :, :, 1] -max_joints[:, :, :, 1]
hand_joints[:, :, :, 2] = hand_joints[:, :, :, 2] - max_joints[:, :, :, 2]
return hand_joints
class Hand3DLoss_wKLD(nn.Module):
def __init__(self, args, pretrain=False):
super(Hand3DLoss_wKLD, self).__init__()
# Initialize Parameters
self.args = args
self.alpha_2d = 5
self.alpha_3d = 10 #100
self.alpha_mask = 10 # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.alpha_kld = 0.00001
self.n_meshes = 778
self.img_size = 224
def forward(self, epoch, mask, predictions, targets):
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta, mu, logvar = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3) # out2[:, :21, :]
# KLD loss
loss_kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
loss_kld = self.alpha_kld * loss_kld
# Calculate the Losses - 2D joint re-projection loss
loss_2d = torch.abs((joint_2d_pred.view(batch_size*seq_size, -1) / self.img_size - joint_2d_target.view(batch_size*seq_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
# Calculate the Losses - 3D joint loss (Only the STEREO dataset)
# print(joint_3d_pred[0, 0, 0, :], joint_3d_target[0, 0, 0, :])
joint_3d_pred, pred_min, pred_max = self.normalize_joints_scale(joint_3d_pred)
joint_3d_target, targ_min, targ_max = self.normalize_joints_scale(joint_3d_target)
_, _, maxp = self.normalize_joints_scale(joint_3d_pred)
_, _, maxt = self.normalize_joints_scale(joint_3d_target)
joint_3d_pred = self.center_joints_scale(joint_3d_pred, maxp)
joint_3d_target = self.center_joints_scale(joint_3d_target,maxt )
# self.draw_3d_mano_pose(joint_3d_pred[0, 0, :, :], joint_3d_target[0, 0, :, :])
diff_3d = joint_3d_pred.view(batch_size * seq_size, -1, 3) - joint_3d_target.view(batch_size * seq_size, -1, 3)
loss_3d = torch.pow(diff_3d.view(batch_size * seq_size, -1), 2).sum(1).mean()
diff_3d = diff_3d * (pred_max - pred_min).repeat(1, 1, 21, 1).view(batch_size * seq_size, -1, 3)
loss_3d = self.alpha_3d * loss_3d
theta_prev = torch.cat((theta[:, 0, :].unsqueeze(1), theta[:, :-1, :]), 1)
beta_prev = torch.cat((beta[:, 0, :].unsqueeze(1), beta[:, :-1, :]), 1)
pose_temp_loss = torch.pow(theta_prev.view(batch_size * seq_size, -1) - theta.view(batch_size * seq_size, -1),
2).sum(1).mean()
shape_temp_loss = torch.pow(beta_prev.view(batch_size * seq_size, -1) - beta.view(batch_size * seq_size, -1),
2).sum(1).mean()
# print('theta_temp loss', pose_temp_loss, 'beta_temp_loss', shape_temp_loss)
if 'ConvLSTM' in self.args.model_name :
loss_temp = ( 0.05*pose_temp_loss + 100. * shape_temp_loss)
# loss_temp *= 0.
else : loss_temp = torch.zeros(1).cuda()
# Calculate the Losses - Hand mask loss
loss_mask = self.getHandMask(y_hat, mask)
loss_mask = self.alpha_mask * loss_mask
# Calculate the Losses - Regularization loss
loss_reg = torch.zeros(1).cuda()
# Calculate the Losses - Camera Parameter Loss
if camera_param_target.sum().abs().item() > 0:
if dataset_type[0] == 7 :
loss_camera = torch.nn.functional.mse_loss(
camera_param_pred[:, :, 16:26].view(batch_size * seq_size, -1),
camera_param_target[:, :, 16:26].view(batch_size * seq_size, -1))
else :
loss_camera_scale = torch.nn.functional.mse_loss(camera_param_pred[:, :, 0:1].view(batch_size*seq_size, -1), camera_param_target[:, :, 0:1].view(batch_size*seq_size, -1))
loss_camera_trans = torch.nn.functional.mse_loss(camera_param_pred[:, :, 1:3].view(batch_size*seq_size, -1), camera_param_target[:, :, 1:3].view(batch_size*seq_size, -1))
loss_camera_rot = torch.nn.functional.mse_loss(camera_param_pred[:, :, 3:6].view(batch_size*seq_size, -1), camera_param_target[:, :, 3:6].view(batch_size*seq_size, -1))
loss_camera_theta = torch.nn.functional.mse_loss(camera_param_pred[:, :, 6:16].view(batch_size*seq_size, -1), camera_param_target[:, :, 6:16].view(batch_size*seq_size, -1))
loss_camera_beta = torch.nn.functional.mse_loss(camera_param_pred[:, :, 16:26].view(batch_size*seq_size, -1), camera_param_target[:, :, 16:26].view(batch_size*seq_size, -1))
loss_camera = loss_camera_scale + loss_camera_trans + loss_camera_rot + loss_camera_theta + loss_camera_beta
theta_dif = np.sum(np.square((camera_param_pred[:, :, 6:16].view(batch_size, seq_size, -1) -
camera_param_target[:, :, 6:16].view(batch_size, seq_size, -1) ).detach().cpu().numpy()), axis=0)
beta_dif = np.sum(np.square((camera_param_pred[:, :, 16:26].view(batch_size, seq_size, -1) -
camera_param_target[:, :, 16:26].view(batch_size, seq_size, -1) ).detach().cpu().numpy()), axis=0)
print('theta ', theta_dif,'\n', 'beta ', beta_dif)
loss_camera = self.alpha_camera * loss_camera
else:
loss_camera = torch.zeros(1).cuda()
# Weighted sum
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp + loss_kld
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(self.args.n_kps):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred.view(batch_size*seq_size, -1, 2) - joint_2d_target.view(batch_size*seq_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.detach().cpu().numpy()), axis=2))
for i in range(self.args.n_kps):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_kld.item() , loss_camera.item(), avg_distance_2d, avg_distance_3d]
def getHandMask(self, y_hat, mask):
batch_size, seq_size, _, h, w = mask.size()
loss_mask = torch.ones(batch_size, seq_size, 1).cuda()
y_hat = y_hat.round().long()
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] >= w, torch.tensor(w-1, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] >= h, torch.tensor(h-1, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
for i_batch in range(batch_size):
for i_seq in range(seq_size):
loss_mask[i_batch, i_seq] = loss_mask[i_batch, i_seq] - mask[i_batch, i_seq, 0, y_hat[i_batch, i_seq, :, 1], y_hat[i_batch, i_seq, :, 0]].sum()/self.n_meshes
return loss_mask.mean()
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=2, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=2, keepdim=True)
hand_joints[:, :, :, 0] = (hand_joints[:, :, :, 0] - min_joints[:, :, :, 0]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 1] = (hand_joints[:, :, :, 1] - min_joints[:, :, :, 1]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 2] = (hand_joints[:, :, :, 2] - min_joints[:, :, :, 2]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, :, :, 0] = hand_joints[:, :, :, 0] - max_joints[:, :, :, 0]
hand_joints[:, :, :, 1] = hand_joints[:, :, :, 1] -max_joints[:, :, :, 1]
hand_joints[:, :, :, 2] = hand_joints[:, :, :, 2] - max_joints[:, :, :, 2]
return hand_joints
class Hand3DLoss(nn.Module):
def __init__(self, args, pretrain=False):
super(Hand3DLoss, self).__init__()
# Initialize Parameters
self.args = args
self.pretrain = pretrain
if self.pretrain:
self.alpha_2d = 0
self.alpha_3d = 0
self.alpha_mask = 0
self.alpha_reg = 0
self.alpha_beta = 0
self.alpha_camera = 1
else:
self.alpha_2d = 5
self.alpha_3d = 100 #100
self.alpha_mask = 10 # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.n_meshes = 778
self.img_size = 224
def getRampUpScale(self, epoch):
if self.pretrain:
return torch.ones(1).cuda()
else:
return torch.ones(1).cuda()
# return torch.FloatTensor([(epoch+1) / self.args.max_epochs_ramp_up]).cuda()
def forward(self, epoch, mask, predictions, targets):
# Initialize RampUp Scale
rampup_scale = self.getRampUpScale(epoch)
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3) # out2[:, :21, :]
# Calculate the Losses - 2D joint re-projection loss
loss_2d = torch.abs((joint_2d_pred.view(batch_size*seq_size, -1) / self.img_size - joint_2d_target.view(batch_size*seq_size, -1) / self.img_size)).sum(1).mean()
loss_2d = rampup_scale * self.alpha_2d * loss_2d
# Calculate the Losses - 3D joint loss (Only the STEREO dataset)
# print(joint_3d_pred[0, 0, 0, :], joint_3d_target[0, 0, 0, :])
# joint_3d_pred, pred_min, pred_max = self.normalize_joints_scale(joint_3d_pred)
# joint_3d_target, targ_min, targ_max = self.normalize_joints_scale(joint_3d_target)
# _, _, maxp = self.normalize_joints_scale(joint_3d_pred)
# _, _, maxt = self.normalize_joints_scale(joint_3d_target)
# joint_3d_pred = self.center_joints_scale(joint_3d_pred, maxp)
# joint_3d_target = self.center_joints_scale(joint_3d_target,maxt )
# self.draw_3d_mano_pose(joint_3d_pred[0, 0, :, :], joint_3d_target[0, 0, :, :])
diff_3d = joint_3d_pred.view(batch_size * seq_size, -1, 3) - joint_3d_target.view(batch_size * seq_size, -1, 3)
loss_3d = torch.pow(diff_3d.view(batch_size * seq_size, -1), 2).sum(1).mean()
#diff_3d = diff_3d * (pred_max - pred_min).repeat(1, 1, 21, 1).view(batch_size * seq_size, -1, 3)
loss_3d = rampup_scale * self.alpha_3d * loss_3d
theta_prev = torch.cat((theta[:, 0, :].unsqueeze(1), theta[:, :-1, :]), 1)
beta_prev = torch.cat((beta[:, 0, :].unsqueeze(1), beta[:, :-1, :]), 1)
pose_temp_loss = torch.pow(theta_prev.view(batch_size * seq_size, -1) - theta.view(batch_size * seq_size, -1),
2).sum(1).mean()
shape_temp_loss = torch.pow(beta_prev.view(batch_size * seq_size, -1) - beta.view(batch_size * seq_size, -1),
2).sum(1).mean()
# print('theta_temp loss', pose_temp_loss, 'beta_temp_loss', shape_temp_loss)
if 'ConvLSTM' in self.args.model_name :
loss_temp = 0.*( 0.05 * pose_temp_loss + 1. * shape_temp_loss)
# loss_temp *= 0.
else : loss_temp = torch.zeros(1).cuda()
# Calculate the Losses - Hand mask loss
loss_mask = self.getHandMask(y_hat, mask)
loss_mask = rampup_scale * self.alpha_mask * loss_mask
# Calculate the Losses - Regularization loss
loss_reg = torch.zeros(1).cuda()
# Calculate the Losses - Camera Parameter Loss
if camera_param_target.sum().abs().item() > 0:
if dataset_type[0] == 7 :
loss_camera = torch.nn.functional.mse_loss(
camera_param_pred[:, :, 16:26].view(batch_size * seq_size, -1),
camera_param_target[:, :, 16:26].view(batch_size * seq_size, -1))
else :
loss_camera_scale = torch.nn.functional.mse_loss(camera_param_pred[:, :, 0:1].view(batch_size*seq_size, -1), camera_param_target[:, :, 0:1].view(batch_size*seq_size, -1))
loss_camera_trans = torch.nn.functional.mse_loss(camera_param_pred[:, :, 1:3].view(batch_size*seq_size, -1), camera_param_target[:, :, 1:3].view(batch_size*seq_size, -1))
loss_camera_rot = torch.nn.functional.mse_loss(camera_param_pred[:, :, 3:6].view(batch_size*seq_size, -1), camera_param_target[:, :, 3:6].view(batch_size*seq_size, -1))
loss_camera_theta = torch.nn.functional.mse_loss(camera_param_pred[:, :, 6:16].view(batch_size*seq_size, -1), camera_param_target[:, :, 6:16].view(batch_size*seq_size, -1))
loss_camera_beta = torch.nn.functional.mse_loss(camera_param_pred[:, :, 16:26].view(batch_size*seq_size, -1), camera_param_target[:, :, 16:26].view(batch_size*seq_size, -1))
loss_camera = loss_camera_scale + loss_camera_trans + loss_camera_rot + loss_camera_theta + loss_camera_beta
theta_dif = np.sum(np.square((camera_param_pred[:, :, 6:16].view(batch_size, seq_size, -1) -
camera_param_target[:, :, 6:16].view(batch_size, seq_size, -1) ).detach().cpu().numpy()), axis=0)
beta_dif = np.sum(np.square((camera_param_pred[:, :, 16:26].view(batch_size, seq_size, -1) -
camera_param_target[:, :, 16:26].view(batch_size, seq_size, -1) ).detach().cpu().numpy()), axis=0)
print(theta_dif, beta_dif)
loss_camera = self.alpha_camera * loss_camera
else:
loss_camera = torch.zeros(1).cuda()
# Weighted sum
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(self.args.n_kps):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred.view(batch_size*seq_size, -1, 2) - joint_2d_target.view(batch_size*seq_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.detach().cpu().numpy()), axis=2))
for i in range(self.args.n_kps):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_reg.item(), loss_camera.item(), avg_distance_2d, avg_distance_3d]
def getHandMask(self, y_hat, mask):
batch_size, seq_size, _, h, w = mask.size()
loss_mask = torch.ones(batch_size, seq_size, 1).cuda()
y_hat = y_hat.round().long()
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] >= w, torch.tensor(w-1, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] >= h, torch.tensor(h-1, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
for i_batch in range(batch_size):
for i_seq in range(seq_size):
loss_mask[i_batch, i_seq] = loss_mask[i_batch, i_seq] - mask[i_batch, i_seq, 0, y_hat[i_batch, i_seq, :, 1], y_hat[i_batch, i_seq, :, 0]].sum()/self.n_meshes
return loss_mask.mean()
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=2, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=2, keepdim=True)
hand_joints[:, :, :, 0] = (hand_joints[:, :, :, 0] - min_joints[:, :, :, 0]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 1] = (hand_joints[:, :, :, 1] - min_joints[:, :, :, 1]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 2] = (hand_joints[:, :, :, 2] - min_joints[:, :, :, 2]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, :, :, 0] = hand_joints[:, :, :, 0] - max_joints[:, :, :, 0]
hand_joints[:, :, :, 1] = hand_joints[:, :, :, 1] -max_joints[:, :, :, 1]
hand_joints[:, :, :, 2] = hand_joints[:, :, :, 2] - max_joints[:, :, :, 2]
return hand_joints
def draw_3d_mano_pose(self, pose_3d, pose_3d2, color='black', color2 ='red'):
pose_3d = pose_3d.reshape(21, 3)
pose_3d2 = pose_3d2.reshape(21, 3)
# print(pose_3d[0][0], pose_3d[0][1], pose_3d[0][2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
b = color # or 'red'
ax.plot([pose_3d2[0][0], pose_3d2[1][0]], [pose_3d2[0][1], pose_3d2[1][1]], zs=[pose_3d2[0][2], pose_3d2[1][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[5][0]], [pose_3d2[0][1], pose_3d2[5][1]], zs=[pose_3d2[0][2], pose_3d2[5][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[9][0]], [pose_3d2[0][1], pose_3d2[9][1]], zs=[pose_3d2[0][2], pose_3d2[9][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[13][0]], [pose_3d2[0][1], pose_3d2[13][1]], zs=[pose_3d2[0][2], pose_3d2[13][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[17][0]], [pose_3d2[0][1], pose_3d2[17][1]], zs=[pose_3d2[0][2], pose_3d2[17][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[1][0], pose_3d2[2][0]], [pose_3d2[1][1], pose_3d2[2][1]], zs=[pose_3d2[1][2], pose_3d2[2][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[2][0], pose_3d2[3][0]], [pose_3d2[2][1], pose_3d2[3][1]], zs=[pose_3d2[2][2], pose_3d2[3][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[3][0], pose_3d2[4][0]], [pose_3d2[3][1], pose_3d2[4][1]], zs=[pose_3d2[3][2], pose_3d2[4][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[5][0], pose_3d2[6][0]], [pose_3d2[5][1], pose_3d2[6][1]], zs=[pose_3d2[5][2], pose_3d2[6][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[6][0], pose_3d2[7][0]], [pose_3d2[6][1], pose_3d2[7][1]], zs=[pose_3d2[6][2], pose_3d2[7][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[7][0], pose_3d2[8][0]], [pose_3d2[7][1], pose_3d2[8][1]], zs=[pose_3d2[7][2], pose_3d2[8][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[9][0], pose_3d2[10][0]], [pose_3d2[9][1], pose_3d2[10][1]], zs=[pose_3d2[9][2], pose_3d2[10][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[10][0], pose_3d2[11][0]], [pose_3d2[10][1], pose_3d2[11][1]], zs=[pose_3d2[10][2], pose_3d2[11][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[11][0], pose_3d2[12][0]], [pose_3d2[11][1], pose_3d2[12][1]], zs=[pose_3d2[11][2], pose_3d2[12][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[13][0], pose_3d2[14][0]], [pose_3d2[13][1], pose_3d2[14][1]], zs=[pose_3d2[13][2], pose_3d2[14][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[14][0], pose_3d2[15][0]], [pose_3d2[14][1], pose_3d2[15][1]], zs=[pose_3d2[14][2], pose_3d2[15][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[15][0], pose_3d2[16][0]], [pose_3d2[15][1], pose_3d2[16][1]], zs=[pose_3d2[15][2], pose_3d2[16][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[17][0], pose_3d2[18][0]], [pose_3d2[17][1], pose_3d2[18][1]], zs=[pose_3d2[17][2], pose_3d2[18][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[18][0], pose_3d2[19][0]], [pose_3d2[18][1], pose_3d2[19][1]], zs=[pose_3d2[18][2], pose_3d2[19][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[19][0], pose_3d2[20][0]], [pose_3d2[19][1], pose_3d2[20][1]], zs=[pose_3d2[19][2], pose_3d2[20][2]],
linewidth=3, color=color2)
ax.plot([pose_3d[0][0], pose_3d[1][0]], [pose_3d[0][1], pose_3d[1][1]], zs=[pose_3d[0][2], pose_3d[1][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[5][0]], [pose_3d[0][1], pose_3d[5][1]], zs=[pose_3d[0][2], pose_3d[5][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[9][0]], [pose_3d[0][1], pose_3d[9][1]], zs=[pose_3d[0][2], pose_3d[9][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[13][0]], [pose_3d[0][1], pose_3d[13][1]], zs=[pose_3d[0][2], pose_3d[13][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[17][0]], [pose_3d[0][1], pose_3d[17][1]], zs=[pose_3d[0][2], pose_3d[17][2]],
linewidth=3, color=b)
ax.plot([pose_3d[1][0], pose_3d[2][0]], [pose_3d[1][1], pose_3d[2][1]], zs=[pose_3d[1][2], pose_3d[2][2]],
linewidth=3, color=b)
ax.plot([pose_3d[2][0], pose_3d[3][0]], [pose_3d[2][1], pose_3d[3][1]], zs=[pose_3d[2][2], pose_3d[3][2]],
linewidth=3, color=b)
ax.plot([pose_3d[3][0], pose_3d[4][0]], [pose_3d[3][1], pose_3d[4][1]], zs=[pose_3d[3][2], pose_3d[4][2]],
linewidth=3, color=b)
ax.plot([pose_3d[5][0], pose_3d[6][0]], [pose_3d[5][1], pose_3d[6][1]], zs=[pose_3d[5][2], pose_3d[6][2]],
linewidth=3, color=b)
ax.plot([pose_3d[6][0], pose_3d[7][0]], [pose_3d[6][1], pose_3d[7][1]], zs=[pose_3d[6][2], pose_3d[7][2]],
linewidth=3, color=b)
ax.plot([pose_3d[7][0], pose_3d[8][0]], [pose_3d[7][1], pose_3d[8][1]], zs=[pose_3d[7][2], pose_3d[8][2]],
linewidth=3, color=b)
ax.plot([pose_3d[9][0], pose_3d[10][0]], [pose_3d[9][1], pose_3d[10][1]], zs=[pose_3d[9][2], pose_3d[10][2]],
linewidth=3, color=b)
ax.plot([pose_3d[10][0], pose_3d[11][0]], [pose_3d[10][1], pose_3d[11][1]], zs=[pose_3d[10][2], pose_3d[11][2]],
linewidth=3, color=b)
ax.plot([pose_3d[11][0], pose_3d[12][0]], [pose_3d[11][1], pose_3d[12][1]], zs=[pose_3d[11][2], pose_3d[12][2]],
linewidth=3, color=b)
ax.plot([pose_3d[13][0], pose_3d[14][0]], [pose_3d[13][1], pose_3d[14][1]], zs=[pose_3d[13][2], pose_3d[14][2]],
linewidth=3, color=b)
ax.plot([pose_3d[14][0], pose_3d[15][0]], [pose_3d[14][1], pose_3d[15][1]], zs=[pose_3d[14][2], pose_3d[15][2]],
linewidth=3, color=b)
ax.plot([pose_3d[15][0], pose_3d[16][0]], [pose_3d[15][1], pose_3d[16][1]], zs=[pose_3d[15][2], pose_3d[16][2]],
linewidth=3, color=b)
ax.plot([pose_3d[17][0], pose_3d[18][0]], [pose_3d[17][1], pose_3d[18][1]], zs=[pose_3d[17][2], pose_3d[18][2]],
linewidth=3, color=b)
ax.plot([pose_3d[18][0], pose_3d[19][0]], [pose_3d[18][1], pose_3d[19][1]], zs=[pose_3d[18][2], pose_3d[19][2]],
linewidth=3, color=b)
ax.plot([pose_3d[19][0], pose_3d[20][0]], [pose_3d[19][1], pose_3d[20][1]], zs=[pose_3d[19][2], pose_3d[20][2]],
linewidth=3, color=b)
plt.show()
return ax
def getHandMask(y_hat, mask):
batch_size, seq_size, _, h, w = mask.size()
loss_mask = torch.ones(batch_size, seq_size, 1).cuda()
y_hat = y_hat.round().long()
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] >= w, torch.tensor(w-1, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] >= h, torch.tensor(h-1, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
for i_batch in range(batch_size):
for i_seq in range(seq_size):
loss_mask[i_batch, i_seq] = loss_mask[i_batch, i_seq] - mask[i_batch, i_seq, 0, y_hat[i_batch, i_seq, :, 1], y_hat[i_batch, i_seq, :, 0]].sum()/778
return loss_mask.mean()
def getHandMask(y_hat, mask):
batch_size, seq_size, _, h, w = mask.size()
loss_mask = torch.ones(batch_size, seq_size, 1).cuda()
y_hat = y_hat.round().long()
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] >= w, torch.tensor(w-1, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] >= h, torch.tensor(h-1, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
for i_batch in range(batch_size):
for i_seq in range(seq_size):
loss_mask[i_batch, i_seq] = loss_mask[i_batch, i_seq] - mask[i_batch, i_seq, 0, y_hat[i_batch, i_seq, :, 1], y_hat[i_batch, i_seq, :, 0]].sum()/778
return loss_mask.mean()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class STBLoss(nn.Module):
def __init__(self, args, pretrain=False):
super(STBLoss, self).__init__()
# Initialize Parameters
self.args = args
self.pretrain = pretrain
self.alpha_2d = 1
self.alpha_3d = 10 #100
self.alpha_mask = 5 # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.n_meshes = 778
self.img_size = 224
def getRampUpScale(self, epoch):
if self.pretrain:
return torch.ones(1).cuda()
else:
return torch.ones(1).cuda()
# return torch.FloatTensor([(epoch+1) / self.args.max_epochs_ramp_up]).cuda()
def forward(self, epoch, mask, predictions, targets):
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3)
verts_3d_pred = torch.stack((x3d_pred[:, :, 63::3], x3d_pred[:, :, 64::3], x3d_pred[:, :, 65::3]), dim=3)
# Calculate the Losses - 2D joint re-projection loss
loss_2d = torch.abs((joint_2d_pred.view(batch_size*seq_size, -1) / self.img_size - joint_2d_target.view(batch_size*seq_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
# Calculate the Losses - Temporal loss
loss_temp = torch.zeros(1).cuda()
# theta_prev = torch.cat((theta[:, 0, :].unsqueeze(1), theta[:, :-1, :]), 1)
# beta_prev = torch.cat((beta[:, 0, :].unsqueeze(1), beta[:, :-1:]), 1)
# pose_temp_loss = torch.pow(theta_prev.view(batch_size * seq_size, -1) - theta.view(batch_size * seq_size, -1),
# 2).sum(1).mean()
# shape_temp_loss = torch.pow(beta_prev.view(batch_size * seq_size, -1) - beta.view(batch_size * seq_size, -1),
# 2).sum(1).mean()
# loss_temp = 0.1 * pose_temp_loss + 1. * shape_temp_loss
# Calculate the Losses - Hand mask loss
loss_mask = self.alpha_mask * getHandMask(y_hat, mask)
# Calculate the Losses - Camera loss
loss_camera = torch.zeros(1).cuda()
# Calculate the Losses - Regularization loss
loss_reg = torch.zeros(1).cuda()
# Calculate the Losses - 3D joint loss (Only the STEREO dataset)
# diff_3d = joint_3d_pred.view(batch_size * seq_size, -1, 3) - joint_3d_target.view(batch_size * seq_size, -1, 3)
## normalize
joint_3d_pred, pred_min, pred_max = self.normalize_joints_scale(joint_3d_pred)
joint_3d_target, targ_min, targ_max = self.normalize_joints_scale(joint_3d_target)
verts_3d_pred, _, pred_max_v = self.normalize_joints_scale(verts_3d_pred)
_, _, maxp = self.normalize_joints_scale(joint_3d_pred)
_, _, maxt = self.normalize_joints_scale(joint_3d_target)
_, _, maxv = self.normalize_joints_scale(verts_3d_pred)
# _, pred_mean, pred_std = self.normalize_joints(norm_scaled_joint3d_pred)
# # verts_3d_pred, _, _ = self.normalize_joints(verts_3d_pred)
# joint_3d_target, _, _ = self.normalize_joints(joint_3d_target)
# print(pred_max.size(), pred_mean.size())
# joint_3d_target = self.denormalize_joints_scale(joint_3d_target, pred_min, pred_max)
joint_3d_pred = self.center_joints_scale(joint_3d_pred, maxp)
joint_3d_target = self.center_joints_scale(joint_3d_target,maxt )
verts_3d_pred = self.center_joints_scale(verts_3d_pred, maxv)
# ax = self.draw_3d_mano_pose(joint_3d_pred[0][0], joint_3d_target[0][0], 'black', 'red')
# ax.scatter(xs=verts_3d_pred[0,0,:, 0].cpu().numpy(), ys=verts_3d_pred[0,0,:, 1].cpu().numpy(), zs=verts_3d_pred[0, 0, :, 2].cpu().numpy())
# plt.show()
palm_cent_pred = 0.5 * verts_3d_pred[:, :, 17, :] + 0.5 * verts_3d_pred[:, :, 67, :]
palm_cent_targ = joint_3d_target[:, :, 0, :]
diff_3d_pc = palm_cent_pred.view(batch_size * seq_size, 3) - palm_cent_targ.view(batch_size * seq_size, 3)
diff_3d_ot = joint_3d_pred[:, :, 1:, :].view(batch_size * seq_size, -1, 3) - joint_3d_target[:, :, 1:, :].view(batch_size * seq_size, -1, 3)
diff_3d = torch.cat((diff_3d_pc.unsqueeze(1), diff_3d_ot), 1)
loss_3d = self.alpha_3d * torch.pow(diff_3d.view(batch_size * seq_size, -1), 2).sum(1).mean()
diff_3d = diff_3d * (pred_max - pred_min).repeat(1,1,21,1).view(batch_size * seq_size, -1, 3)
# Weighted sum
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(self.args.n_kps):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred.view(batch_size*seq_size, -1, 2) - joint_2d_target.view(batch_size*seq_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.detach().cpu().numpy()), axis=2))
for i in range(self.args.n_kps):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_reg.item(), loss_camera.item(), avg_distance_2d, avg_distance_3d]
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=2, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=2, keepdim=True)
hand_joints[:, :, :, 0] = (hand_joints[:, :, :, 0] - min_joints[:, :, :, 0]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 1] = (hand_joints[:, :, :, 1] - min_joints[:, :, :, 1]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 2] = (hand_joints[:, :, :, 2] - min_joints[:, :, :, 2]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, :, :, 0] = hand_joints[:, :, :, 0] - max_joints[:, :, :, 0]
hand_joints[:, :, :, 1] = hand_joints[:, :, :, 1] -max_joints[:, :, :, 1]
hand_joints[:, :, :, 2] = hand_joints[:, :, :, 2] - max_joints[:, :, :, 2]
return hand_joints
def draw_3d_mano_pose(self, pose_3d, pose_3d2, color='black', color2 ='red'):
pose_3d = pose_3d.reshape(21, 3)
pose_3d2 = pose_3d2.reshape(21, 3)
# print(pose_3d[0][0], pose_3d[0][1], pose_3d[0][2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
b = color # or 'red'
ax.plot([pose_3d2[0][0], pose_3d2[1][0]], [pose_3d2[0][1], pose_3d2[1][1]], zs=[pose_3d2[0][2], pose_3d2[1][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[5][0]], [pose_3d2[0][1], pose_3d2[5][1]], zs=[pose_3d2[0][2], pose_3d2[5][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[9][0]], [pose_3d2[0][1], pose_3d2[9][1]], zs=[pose_3d2[0][2], pose_3d2[9][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[13][0]], [pose_3d2[0][1], pose_3d2[13][1]], zs=[pose_3d2[0][2], pose_3d2[13][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[17][0]], [pose_3d2[0][1], pose_3d2[17][1]], zs=[pose_3d2[0][2], pose_3d2[17][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[1][0], pose_3d2[2][0]], [pose_3d2[1][1], pose_3d2[2][1]], zs=[pose_3d2[1][2], pose_3d2[2][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[2][0], pose_3d2[3][0]], [pose_3d2[2][1], pose_3d2[3][1]], zs=[pose_3d2[2][2], pose_3d2[3][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[3][0], pose_3d2[4][0]], [pose_3d2[3][1], pose_3d2[4][1]], zs=[pose_3d2[3][2], pose_3d2[4][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[5][0], pose_3d2[6][0]], [pose_3d2[5][1], pose_3d2[6][1]], zs=[pose_3d2[5][2], pose_3d2[6][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[6][0], pose_3d2[7][0]], [pose_3d2[6][1], pose_3d2[7][1]], zs=[pose_3d2[6][2], pose_3d2[7][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[7][0], pose_3d2[8][0]], [pose_3d2[7][1], pose_3d2[8][1]], zs=[pose_3d2[7][2], pose_3d2[8][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[9][0], pose_3d2[10][0]], [pose_3d2[9][1], pose_3d2[10][1]], zs=[pose_3d2[9][2], pose_3d2[10][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[10][0], pose_3d2[11][0]], [pose_3d2[10][1], pose_3d2[11][1]], zs=[pose_3d2[10][2], pose_3d2[11][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[11][0], pose_3d2[12][0]], [pose_3d2[11][1], pose_3d2[12][1]], zs=[pose_3d2[11][2], pose_3d2[12][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[13][0], pose_3d2[14][0]], [pose_3d2[13][1], pose_3d2[14][1]], zs=[pose_3d2[13][2], pose_3d2[14][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[14][0], pose_3d2[15][0]], [pose_3d2[14][1], pose_3d2[15][1]], zs=[pose_3d2[14][2], pose_3d2[15][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[15][0], pose_3d2[16][0]], [pose_3d2[15][1], pose_3d2[16][1]], zs=[pose_3d2[15][2], pose_3d2[16][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[17][0], pose_3d2[18][0]], [pose_3d2[17][1], pose_3d2[18][1]], zs=[pose_3d2[17][2], pose_3d2[18][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[18][0], pose_3d2[19][0]], [pose_3d2[18][1], pose_3d2[19][1]], zs=[pose_3d2[18][2], pose_3d2[19][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[19][0], pose_3d2[20][0]], [pose_3d2[19][1], pose_3d2[20][1]], zs=[pose_3d2[19][2], pose_3d2[20][2]],
linewidth=3, color=color2)
ax.plot([pose_3d[0][0], pose_3d[1][0]], [pose_3d[0][1], pose_3d[1][1]], zs=[pose_3d[0][2], pose_3d[1][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[5][0]], [pose_3d[0][1], pose_3d[5][1]], zs=[pose_3d[0][2], pose_3d[5][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[9][0]], [pose_3d[0][1], pose_3d[9][1]], zs=[pose_3d[0][2], pose_3d[9][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[13][0]], [pose_3d[0][1], pose_3d[13][1]], zs=[pose_3d[0][2], pose_3d[13][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[17][0]], [pose_3d[0][1], pose_3d[17][1]], zs=[pose_3d[0][2], pose_3d[17][2]],
linewidth=3, color=b)
ax.plot([pose_3d[1][0], pose_3d[2][0]], [pose_3d[1][1], pose_3d[2][1]], zs=[pose_3d[1][2], pose_3d[2][2]],
linewidth=3, color=b)
ax.plot([pose_3d[2][0], pose_3d[3][0]], [pose_3d[2][1], pose_3d[3][1]], zs=[pose_3d[2][2], pose_3d[3][2]],
linewidth=3, color=b)
ax.plot([pose_3d[3][0], pose_3d[4][0]], [pose_3d[3][1], pose_3d[4][1]], zs=[pose_3d[3][2], pose_3d[4][2]],
linewidth=3, color=b)
ax.plot([pose_3d[5][0], pose_3d[6][0]], [pose_3d[5][1], pose_3d[6][1]], zs=[pose_3d[5][2], pose_3d[6][2]],
linewidth=3, color=b)
ax.plot([pose_3d[6][0], pose_3d[7][0]], [pose_3d[6][1], pose_3d[7][1]], zs=[pose_3d[6][2], pose_3d[7][2]],
linewidth=3, color=b)
ax.plot([pose_3d[7][0], pose_3d[8][0]], [pose_3d[7][1], pose_3d[8][1]], zs=[pose_3d[7][2], pose_3d[8][2]],
linewidth=3, color=b)
ax.plot([pose_3d[9][0], pose_3d[10][0]], [pose_3d[9][1], pose_3d[10][1]], zs=[pose_3d[9][2], pose_3d[10][2]],
linewidth=3, color=b)
ax.plot([pose_3d[10][0], pose_3d[11][0]], [pose_3d[10][1], pose_3d[11][1]], zs=[pose_3d[10][2], pose_3d[11][2]],
linewidth=3, color=b)
ax.plot([pose_3d[11][0], pose_3d[12][0]], [pose_3d[11][1], pose_3d[12][1]], zs=[pose_3d[11][2], pose_3d[12][2]],
linewidth=3, color=b)
ax.plot([pose_3d[13][0], pose_3d[14][0]], [pose_3d[13][1], pose_3d[14][1]], zs=[pose_3d[13][2], pose_3d[14][2]],
linewidth=3, color=b)
ax.plot([pose_3d[14][0], pose_3d[15][0]], [pose_3d[14][1], pose_3d[15][1]], zs=[pose_3d[14][2], pose_3d[15][2]],
linewidth=3, color=b)
ax.plot([pose_3d[15][0], pose_3d[16][0]], [pose_3d[15][1], pose_3d[16][1]], zs=[pose_3d[15][2], pose_3d[16][2]],
linewidth=3, color=b)
ax.plot([pose_3d[17][0], pose_3d[18][0]], [pose_3d[17][1], pose_3d[18][1]], zs=[pose_3d[17][2], pose_3d[18][2]],
linewidth=3, color=b)
ax.plot([pose_3d[18][0], pose_3d[19][0]], [pose_3d[18][1], pose_3d[19][1]], zs=[pose_3d[18][2], pose_3d[19][2]],
linewidth=3, color=b)
ax.plot([pose_3d[19][0], pose_3d[20][0]], [pose_3d[19][1], pose_3d[20][1]], zs=[pose_3d[19][2], pose_3d[20][2]],
linewidth=3, color=b)
return ax
class RWorldLoss(nn.Module):
def __init__(self, args, pretrain=False):
super(RWorldLoss, self).__init__()
# Initialize Parameters
self.args = args
self.pretrain = pretrain
if self.pretrain:
self.alpha_2d = 0
self.alpha_3d = 0
self.alpha_mask = 0
self.alpha_reg = 0
self.alpha_beta = 0
self.alpha_camera = 1
else:
self.alpha_2d = 5
self.alpha_3d = 100
self.alpha_mask = 0
self.alpha_reg = 0
self.alpha_beta = 0
self.alpha_camera = 0
self.n_meshes = 778
self.img_size = 224
def getRampUpScale(self, epoch):
if self.pretrain:
return torch.ones(1).cuda()
else:
return torch.ones(1).cuda()
# return torch.FloatTensor([(epoch+1) / self.args.max_epochs_ramp_up]).cuda()
def forward(self, epoch, mask, predictions, targets):
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# print(joint_3d_target, x3d_pred)
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
# No loss for Camera param vert3d loss,
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3) # out2[:, :21, :]
joint_3d_pred, pred_min, pred_max = self.normalize_joints_scale(joint_3d_pred)
joint_3d_target, targ_min, targ_max = self.normalize_joints_scale(joint_3d_target)
_, _, maxp = self.normalize_joints_scale(joint_3d_pred)
_, _, maxt = self.normalize_joints_scale(joint_3d_target)
joint_3d_pred = self.center_joints_scale(joint_3d_pred, maxp)
joint_3d_target = self.center_joints_scale(joint_3d_target, maxt)
diff_3d = joint_3d_pred.view(batch_size * seq_size, -1, 3) - joint_3d_target.view(batch_size * seq_size, -1, 3)
loss_3d = self.alpha_3d * torch.pow(diff_3d.view(batch_size * seq_size, -1), 2).sum(1).mean()
diff_3d = diff_3d * (pred_max - pred_min).repeat(1, 1, 21, 1).view(batch_size * seq_size, -1, 3)
# Weighted sum
loss_2d = torch.abs((joint_2d_pred.view(batch_size * seq_size, -1) / self.img_size - joint_2d_target.view(
batch_size * seq_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
loss_temp = torch.zeros(1).cuda()
loss_mask = self.alpha_mask * getHandMask(y_hat, mask)
loss_camera = torch.zeros(1).cuda()
loss_reg = torch.zeros(1).cuda()
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(self.args.n_kps):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred.view(batch_size * seq_size, -1, 2) - joint_2d_target.view(batch_size * seq_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.detach().cpu().numpy()), axis=2))
for i in range(self.args.n_kps):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_reg.item(), loss_camera.item(),
avg_distance_2d, avg_distance_3d]
def getHandMask(self, y_hat, mask):
batch_size, seq_size, _, h, w = mask.size()
loss_mask = torch.ones(batch_size, seq_size, 1).cuda()
y_hat = y_hat.round().long()
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] >= w, torch.tensor(w-1, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] >= h, torch.tensor(h-1, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
y_hat[:, :, :, 0] = torch.where(y_hat[:, :, :, 0] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 0])
y_hat[:, :, :, 1] = torch.where(y_hat[:, :, :, 1] < 0, torch.tensor(0, dtype=torch.long).cuda(), y_hat[:, :, :, 1])
for i_batch in range(batch_size):
for i_seq in range(seq_size):
loss_mask[i_batch, i_seq] = loss_mask[i_batch, i_seq] - mask[i_batch, i_seq, 0, y_hat[i_batch, i_seq, :, 1], y_hat[i_batch, i_seq, :, 0]].sum()/self.n_meshes
return loss_mask.mean()
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=2, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=2, keepdim=True)
hand_joints[:, :, :, 0] = (hand_joints[:, :, :, 0] - min_joints[:, :, :, 0]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 1] = (hand_joints[:, :, :, 1] - min_joints[:, :, :, 1]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
hand_joints[:, :, :, 2] = (hand_joints[:, :, :, 2] - min_joints[:, :, :, 2]) / (max_joints[:, :, :, 0] - min_joints[:, :, :, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, :, :, 0] = hand_joints[:, :, :, 0] - max_joints[:, :, :, 0]
hand_joints[:, :, :, 1] = hand_joints[:, :, :, 1] -max_joints[:, :, :, 1]
hand_joints[:, :, :, 2] = hand_joints[:, :, :, 2] - max_joints[:, :, :, 2]
return hand_joints
def draw_3d_mano_pose(self, pose_3d, pose_3d2, color='black', color2 ='red'):
pose_3d = pose_3d.reshape(21, 3)
pose_3d2 = pose_3d2.reshape(21, 3)
# print(pose_3d[0][0], pose_3d[0][1], pose_3d[0][2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
b = color # or 'red'
ax.plot([pose_3d2[0][0], pose_3d2[1][0]], [pose_3d2[0][1], pose_3d2[1][1]], zs=[pose_3d2[0][2], pose_3d2[1][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[5][0]], [pose_3d2[0][1], pose_3d2[5][1]], zs=[pose_3d2[0][2], pose_3d2[5][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[9][0]], [pose_3d2[0][1], pose_3d2[9][1]], zs=[pose_3d2[0][2], pose_3d2[9][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[13][0]], [pose_3d2[0][1], pose_3d2[13][1]], zs=[pose_3d2[0][2], pose_3d2[13][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[0][0], pose_3d2[17][0]], [pose_3d2[0][1], pose_3d2[17][1]], zs=[pose_3d2[0][2], pose_3d2[17][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[1][0], pose_3d2[2][0]], [pose_3d2[1][1], pose_3d2[2][1]], zs=[pose_3d2[1][2], pose_3d2[2][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[2][0], pose_3d2[3][0]], [pose_3d2[2][1], pose_3d2[3][1]], zs=[pose_3d2[2][2], pose_3d2[3][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[3][0], pose_3d2[4][0]], [pose_3d2[3][1], pose_3d2[4][1]], zs=[pose_3d2[3][2], pose_3d2[4][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[5][0], pose_3d2[6][0]], [pose_3d2[5][1], pose_3d2[6][1]], zs=[pose_3d2[5][2], pose_3d2[6][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[6][0], pose_3d2[7][0]], [pose_3d2[6][1], pose_3d2[7][1]], zs=[pose_3d2[6][2], pose_3d2[7][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[7][0], pose_3d2[8][0]], [pose_3d2[7][1], pose_3d2[8][1]], zs=[pose_3d2[7][2], pose_3d2[8][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[9][0], pose_3d2[10][0]], [pose_3d2[9][1], pose_3d2[10][1]], zs=[pose_3d2[9][2], pose_3d2[10][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[10][0], pose_3d2[11][0]], [pose_3d2[10][1], pose_3d2[11][1]], zs=[pose_3d2[10][2], pose_3d2[11][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[11][0], pose_3d2[12][0]], [pose_3d2[11][1], pose_3d2[12][1]], zs=[pose_3d2[11][2], pose_3d2[12][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[13][0], pose_3d2[14][0]], [pose_3d2[13][1], pose_3d2[14][1]], zs=[pose_3d2[13][2], pose_3d2[14][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[14][0], pose_3d2[15][0]], [pose_3d2[14][1], pose_3d2[15][1]], zs=[pose_3d2[14][2], pose_3d2[15][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[15][0], pose_3d2[16][0]], [pose_3d2[15][1], pose_3d2[16][1]], zs=[pose_3d2[15][2], pose_3d2[16][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[17][0], pose_3d2[18][0]], [pose_3d2[17][1], pose_3d2[18][1]], zs=[pose_3d2[17][2], pose_3d2[18][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[18][0], pose_3d2[19][0]], [pose_3d2[18][1], pose_3d2[19][1]], zs=[pose_3d2[18][2], pose_3d2[19][2]],
linewidth=3, color=color2)
ax.plot([pose_3d2[19][0], pose_3d2[20][0]], [pose_3d2[19][1], pose_3d2[20][1]], zs=[pose_3d2[19][2], pose_3d2[20][2]],
linewidth=3, color=color2)
ax.plot([pose_3d[0][0], pose_3d[1][0]], [pose_3d[0][1], pose_3d[1][1]], zs=[pose_3d[0][2], pose_3d[1][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[5][0]], [pose_3d[0][1], pose_3d[5][1]], zs=[pose_3d[0][2], pose_3d[5][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[9][0]], [pose_3d[0][1], pose_3d[9][1]], zs=[pose_3d[0][2], pose_3d[9][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[13][0]], [pose_3d[0][1], pose_3d[13][1]], zs=[pose_3d[0][2], pose_3d[13][2]],
linewidth=3, color=b)
ax.plot([pose_3d[0][0], pose_3d[17][0]], [pose_3d[0][1], pose_3d[17][1]], zs=[pose_3d[0][2], pose_3d[17][2]],
linewidth=3, color=b)
ax.plot([pose_3d[1][0], pose_3d[2][0]], [pose_3d[1][1], pose_3d[2][1]], zs=[pose_3d[1][2], pose_3d[2][2]],
linewidth=3, color=b)
ax.plot([pose_3d[2][0], pose_3d[3][0]], [pose_3d[2][1], pose_3d[3][1]], zs=[pose_3d[2][2], pose_3d[3][2]],
linewidth=3, color=b)
ax.plot([pose_3d[3][0], pose_3d[4][0]], [pose_3d[3][1], pose_3d[4][1]], zs=[pose_3d[3][2], pose_3d[4][2]],
linewidth=3, color=b)
ax.plot([pose_3d[5][0], pose_3d[6][0]], [pose_3d[5][1], pose_3d[6][1]], zs=[pose_3d[5][2], pose_3d[6][2]],
linewidth=3, color=b)
ax.plot([pose_3d[6][0], pose_3d[7][0]], [pose_3d[6][1], pose_3d[7][1]], zs=[pose_3d[6][2], pose_3d[7][2]],
linewidth=3, color=b)
ax.plot([pose_3d[7][0], pose_3d[8][0]], [pose_3d[7][1], pose_3d[8][1]], zs=[pose_3d[7][2], pose_3d[8][2]],
linewidth=3, color=b)
ax.plot([pose_3d[9][0], pose_3d[10][0]], [pose_3d[9][1], pose_3d[10][1]], zs=[pose_3d[9][2], pose_3d[10][2]],
linewidth=3, color=b)
ax.plot([pose_3d[10][0], pose_3d[11][0]], [pose_3d[10][1], pose_3d[11][1]], zs=[pose_3d[10][2], pose_3d[11][2]],
linewidth=3, color=b)
ax.plot([pose_3d[11][0], pose_3d[12][0]], [pose_3d[11][1], pose_3d[12][1]], zs=[pose_3d[11][2], pose_3d[12][2]],
linewidth=3, color=b)
ax.plot([pose_3d[13][0], pose_3d[14][0]], [pose_3d[13][1], pose_3d[14][1]], zs=[pose_3d[13][2], pose_3d[14][2]],
linewidth=3, color=b)
ax.plot([pose_3d[14][0], pose_3d[15][0]], [pose_3d[14][1], pose_3d[15][1]], zs=[pose_3d[14][2], pose_3d[15][2]],
linewidth=3, color=b)
ax.plot([pose_3d[15][0], pose_3d[16][0]], [pose_3d[15][1], pose_3d[16][1]], zs=[pose_3d[15][2], pose_3d[16][2]],
linewidth=3, color=b)
ax.plot([pose_3d[17][0], pose_3d[18][0]], [pose_3d[17][1], pose_3d[18][1]], zs=[pose_3d[17][2], pose_3d[18][2]],
linewidth=3, color=b)
ax.plot([pose_3d[18][0], pose_3d[19][0]], [pose_3d[18][1], pose_3d[19][1]], zs=[pose_3d[18][2], pose_3d[19][2]],
linewidth=3, color=b)
ax.plot([pose_3d[19][0], pose_3d[20][0]], [pose_3d[19][1], pose_3d[20][1]], zs=[pose_3d[19][2], pose_3d[20][2]],
linewidth=3, color=b)
return ax
class EgoDexLoss(nn.Module):
def __init__(self, args, pretrain=False):
super(EgoDexLoss, self).__init__()
# Initialize Parameters
self.args = args
self.pretrain = pretrain
self.alpha_2d = 5.
self.alpha_3d = 100 #100
self.alpha_mask = 0. # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.n_meshes = 778
self.img_size = 224
def getRampUpScale(self, epoch):
if self.pretrain:
return torch.ones(1).cuda()
else:
return torch.ones(1).cuda()
# return torch.FloatTensor([(epoch+1) / self.args.max_epochs_ramp_up]).cuda()
def forward(self, epoch, mask, predictions, targets):
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3)
verts_3d_pred = torch.stack((x3d_pred[:, :, 63::3], x3d_pred[:, :, 64::3], x3d_pred[:, :, 65::3]), dim=3)
# Calculate the Losses - 2D joint re-projection loss
loss_2d = torch.abs((joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :].view(batch_size, -1) / self.img_size
- joint_2d_target[:, -1, :, :].view(batch_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
# Calculate the Losses - Temporal loss
loss_temp = torch.zeros(1).cuda()
# Calculate the Losses - Hand mask loss
loss_mask = torch.zeros(1).cuda()
# Calculate the Losses - Camera loss
loss_camera = torch.zeros(1).cuda()
# Calculate the Losses - Regularization loss
loss_reg = torch.zeros(1).cuda()
# Calculate the Losses - 3D joint loss (Only the STEREO dataset)
diff_3d = torch.zeros(batch_size, 1, 5, 3).cuda()
# last frame only
joint3d_pred = joint_3d_pred[:, -1, [4, 8, 12, 16, 20], :] # last frame
joint3d_targ = joint_3d_target[:, -1, :, :]
# joint2d_pred = joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :] # last frame
# joint2d_targ = joint_2d_target[:, -1, :, :]
#loop over batch
for b in range(batch_size):
targs3d = joint3d_targ[b, :] # 5, 3
preds3d = joint3d_pred[b, :]
preds3d[joint3d_targ[b, :] == 0.] = 0.
visible_indice = (joint3d_targ[b, :] != 0.).nonzero()
visible_indice_mask = joint3d_targ[b, :] != 0.
## normalize
preds3d, pred_min, pred_max = self.normalize_joints_scale(preds3d.clone())
targs3d, targ_min, targ_max = self.normalize_joints_scale(targs3d.clone())
maxp, _ = torch.max(preds3d.clone(), dim=0, keepdim=True)
maxt, _ = torch.max(targs3d.clone(), dim=0, keepdim=True)
preds3d = self.center_joints_scale(preds3d.clone(), maxp)
targs3d = self.center_joints_scale(targs3d.clone(), maxt)
# plt.figure()
# ax = plt.axes(projection='3d')
# ax.scatter3D(xs=preds3d[:, 0].clone().detach().cpu().numpy(),
# ys=preds3d[:, 1].clone().detach().cpu().numpy(),
# zs=preds3d[:, 2].clone().detach().cpu().numpy(),
# c='blue')
# ax.scatter3D(xs=targs3d[:, 0].clone().detach().cpu().numpy(),
# ys=targs3d[:, 1].clone().detach().cpu().numpy(),
# zs=targs3d[:, 2].clone().detach().cpu().numpy(),
# c='red')
# plt.show()
targs3d = targs3d[visible_indice_mask].view(torch.unique(visible_indice[:, 0]).size()[0], 3) # joint, coord
preds3d = preds3d[visible_indice_mask].view(torch.unique(visible_indice[:, 0]).size()[0], 3) # joint, coord
diff_3d_ego = (targs3d - preds3d) * (pred_max - pred_min).repeat(torch.unique(visible_indice[:, 0]).size()[0],1)
diff_3d[b, 0, visible_indice_mask] = diff_3d_ego.view(torch.unique(visible_indice[:, 0]).size()[0] * 3)
loss_3d = self.alpha_3d * torch.pow(diff_3d.view(batch_size, -1), 2).sum(1).mean().cuda()
# Weighted sum
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(5):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :].view(batch_size, -1, 2) - joint_2d_target[:, -1, :, :].view(batch_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.squeeze(1).detach().cpu().numpy()), axis=2)) * 0.7
for i in range(5):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_reg.item(), loss_camera.item(), avg_distance_2d, avg_distance_3d]
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=0, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=0, keepdim=True)
hand_joints[:, 0] = (hand_joints[:, 0] - min_joints[:, 0]) / (max_joints[:, 0] - min_joints[:, 0])
hand_joints[:, 1] = (hand_joints[:, 1] - min_joints[:, 1]) / (max_joints[:, 0] - min_joints[:, 0])
hand_joints[:, 2] = (hand_joints[:, 2] - min_joints[:, 2]) / (max_joints[:, 0] - min_joints[:, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, 0] = hand_joints[:, 0] - max_joints[:, 0]
hand_joints[:, 1] = hand_joints[:, 1] - max_joints[:, 1]
hand_joints[:, 2] = hand_joints[:, 2] - max_joints[:, 2]
return hand_joints
class DexterObjLoss(nn.Module):
def __init__(self, args, pretrain=False):
super(DexterObjLoss, self).__init__()
# Initialize Parameters
self.args = args
self.pretrain = pretrain
self.alpha_2d = 1
self.alpha_3d = 100 #100
self.alpha_mask = 0. # 100
self.alpha_reg = 0#10
self.alpha_beta = 0#10000
self.alpha_camera = 1
self.n_meshes = 778
self.img_size = 224
def getRampUpScale(self, epoch):
if self.pretrain:
return torch.ones(1).cuda()
else:
return torch.ones(1).cuda()
# return torch.FloatTensor([(epoch+1) / self.args.max_epochs_ramp_up]).cuda()
def forward(self, epoch, mask, predictions, targets):
# Initialize predictions
x2d_pred, x3d_pred, camera_param_pred, theta, beta = predictions
# Initialize targets
joint_2d_target, joint_3d_target, verts_3d_target, camera_param_target, dataset_type = targets
# Initialize Variables
batch_size, seq_size, _ = x2d_pred.size()
# Get Vectors
joint_2d_pred = torch.stack((x2d_pred[:, :, :42:2], x2d_pred[:, :, 1:42:2]), dim=3) # x_hat
y_hat = x2d_pred[:, :, 42:].view(batch_size, seq_size, 778, 2)
joint_3d_pred = torch.stack((x3d_pred[:, :, :63:3], x3d_pred[:, :, 1:63:3], x3d_pred[:, :, 2:63:3]), dim=3)
verts_3d_pred = torch.stack((x3d_pred[:, :, 63::3], x3d_pred[:, :, 64::3], x3d_pred[:, :, 65::3]), dim=3)
# Calculate the Losses - 2D joint re-projection loss
loss_2d = torch.abs((joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :].view(batch_size, -1) / self.img_size
- joint_2d_target[:, -1, :, :].view(batch_size, -1) / self.img_size)).sum(1).mean()
loss_2d = self.alpha_2d * loss_2d
# Calculate the Losses - Temporal loss
loss_temp = torch.zeros(1).cuda()
# Calculate the Losses - Hand mask loss
loss_mask = torch.zeros(1).cuda()
# Calculate the Losses - Camera loss
loss_camera = torch.zeros(1).cuda()
# Calculate the Losses - Regularization loss
loss_reg = torch.zeros(1).cuda()
# Calculate the Losses - 3D joint loss (Only the STEREO dataset)
diff_3d = torch.zeros(batch_size, 1, 5, 3).cuda()
# last frame only
joint3d_pred = joint_3d_pred[:, -1, [4, 8, 12, 16, 20], :] # last frame
joint3d_targ = joint_3d_target[:, -1, :, :]
# joint2d_pred = joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :] # last frame
# joint2d_targ = joint_2d_target[:, -1, :, :]
#loop over batch
for b in range(batch_size):
targs3d = joint3d_targ[b, :] # 5, 3
preds3d = joint3d_pred[b, :]
targs3d[joint3d_targ[b, :, 2] == 32001, :] = 0.
preds3d[joint3d_targ[b, :, 2] == 32001, :] = 0.
visible_indice = (joint3d_targ[b, :, 2] != 32001).nonzero()
visible_indice_mask = joint3d_targ[b, :, 2] != 32001
## normalize
preds3d, pred_min, pred_max = self.normalize_joints_scale(preds3d.clone())
targs3d, targ_min, targ_max = self.normalize_joints_scale(targs3d.clone())
maxp, _ = torch.max(preds3d.clone(), dim=0, keepdim=True)
maxt, _ = torch.max(targs3d.clone(), dim=0, keepdim=True)
preds3d = self.center_joints_scale(preds3d.clone(), maxp)
targs3d = self.center_joints_scale(targs3d.clone(), maxt)
targs3d = targs3d[visible_indice_mask, :]
preds3d = preds3d[visible_indice_mask, :]
# plt.figure()
# ax = plt.axes(projection='3d')
# ax.scatter3D(xs=preds3d[:, 0].clone().detach().cpu().numpy(),
# ys=preds3d[:, 1].clone().detach().cpu().numpy(),
# zs=preds3d[:, 2].clone().detach().cpu().numpy(),
# c='blue')
# ax.scatter3D(xs=targs3d[:, 0].clone().detach().cpu().numpy(),
# ys=targs3d[:, 1].clone().detach().cpu().numpy(),
# zs=targs3d[:, 2].clone().detach().cpu().numpy(),
# c='red')
# plt.show()
targs3d = targs3d[visible_indice_mask].view(torch.unique(visible_indice[:, 0]).size()[0], 3) # joint, coord
preds3d = preds3d[visible_indice_mask].view(torch.unique(visible_indice[:, 0]).size()[0], 3) # joint, coord
diff_3d_dex = (targs3d - preds3d) * (pred_max - pred_min).repeat(torch.unique(visible_indice[:, 0]).size()[0],1)
diff_3d[b, 0, visible_indice_mask, :] = diff_3d_dex.view(visible_indice.size()[0], 3)
loss_3d = self.alpha_3d * torch.pow(diff_3d.view(batch_size, -1), 2).sum(1).mean().cuda()
# Weighted sum
loss = loss_2d + loss_3d + loss_mask + loss_reg + loss_camera + loss_temp
# Initialize Average Distance Storage
avg_distance_2d = list()
avg_distance_3d = list()
for _ in range(5):
avg_distance_2d.append(None)
avg_distance_3d.append(None)
# Calculate euclidean distance
diff_2d = joint_2d_pred[:, -1, [4, 8, 12, 16, 20], :].view(batch_size, -1, 2)\
- joint_2d_target[:, -1, :, :].view(batch_size, -1, 2)
euclidean_dist_2d = np.sqrt(np.sum(np.square(diff_2d.detach().cpu().numpy()), axis=2))
euclidean_dist_3d = np.sqrt(np.sum(np.square(diff_3d.squeeze(1).detach().cpu().numpy()), axis=2))
for i in range(5):
avg_distance_2d[i] = euclidean_dist_2d[:, i]
avg_distance_3d[i] = euclidean_dist_3d[:, i]
return loss, [loss_2d.item(), loss_3d.item(), loss_mask.item(), loss_reg.item(), loss_camera.item(), avg_distance_2d, avg_distance_3d]
def normalize_joints_scale(self, hand_joints):
min_joints, _ = torch.min(hand_joints, dim=0, keepdim=True)
max_joints, _ = torch.max(hand_joints, dim=0, keepdim=True)
hand_joints[:, 0] = (hand_joints[:, 0] - min_joints[:, 0]) / (max_joints[:, 0] - min_joints[:, 0])
hand_joints[:, 1] = (hand_joints[:, 1] - min_joints[:, 1]) / (max_joints[:, 0] - min_joints[:, 0])
hand_joints[:, 2] = (hand_joints[:, 2] - min_joints[:, 2]) / (max_joints[:, 0] - min_joints[:, 0])
return hand_joints, min_joints, max_joints
def center_joints_scale(self, hand_joints, max_joints):
hand_joints[:, 0] = hand_joints[:, 0] - max_joints[:, 0]
hand_joints[:, 1] = hand_joints[:, 1] - max_joints[:, 1]
hand_joints[:, 2] = hand_joints[:, 2] - max_joints[:, 2]
return hand_joints
| 61.407087
| 189
| 0.57806
| 11,097
| 71,048
| 3.434712
| 0.023069
| 0.059504
| 0.02259
| 0.050374
| 0.969959
| 0.96319
| 0.95469
| 0.948708
| 0.943251
| 0.932992
| 0
| 0.092314
| 0.230182
| 71,048
| 1,157
| 190
| 61.407087
| 0.604564
| 0.095147
| 0
| 0.893138
| 0
| 0
| 0.000921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046119
| false
| 0
| 0.006749
| 0
| 0.104612
| 0.00225
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
af58affa0f475b2ef0d48347e97c938d2d4c8569
| 5,414
|
py
|
Python
|
tests/chainer_tests/functions_tests/normalization_tests/test_batch_normalization.py
|
Teppei-Kanayama/myChainer
|
6ffbfd8479768ca8b580c98788c5b1ba1fd3aee8
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/functions_tests/normalization_tests/test_batch_normalization.py
|
Teppei-Kanayama/myChainer
|
6ffbfd8479768ca8b580c98788c5b1ba1fd3aee8
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/functions_tests/normalization_tests/test_batch_normalization.py
|
Teppei-Kanayama/myChainer
|
6ffbfd8479768ca8b580c98788c5b1ba1fd3aee8
|
[
"MIT"
] | null | null | null |
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer.functions.normalization import batch_normalization
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _batch_normalization(expander, gamma, beta, x, mean, var):
mean = mean[expander]
std = numpy.sqrt(var)[expander]
y_expect = (gamma[expander] * (x - mean) / std + beta[expander])
return y_expect
@testing.parameterize(*testing.product({
'ndim': [0, 1, 2, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBatchNormalization(unittest.TestCase):
def setUp(self):
self.expander = (None, Ellipsis) + (None,) * self.ndim
self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
self.eps = 1e-5
self.gamma = numpy.random.uniform(.5, 1, (3,)).astype(self.dtype)
self.beta = numpy.random.uniform(-1, 1, (3,)).astype(self.dtype)
shape = (7, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.args = [self.x, self.gamma, self.beta]
self.mean = self.x.mean(axis=self.aggr_axes)
self.var = self.x.var(axis=self.aggr_axes) + self.eps
self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_optionss = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_optionss = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_optionss = {
'eps': 2 ** -3, 'atol': 5e-2, 'rtol': 1e-1}
def check_forward(self, args):
y = functions.batch_normalization(
*[chainer.Variable(i) for i in args], eps=self.eps)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = _batch_normalization(
self.expander, self.gamma, self.beta, self.x, self.mean, self.var)
gradient_check.assert_allclose(
y_expect, y.data, **self.check_forward_optionss)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.args)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(i) for i in self.args])
def check_backward(self, args, y_grad):
gradient_check.check_backward(
batch_normalization.BatchNormalizationFunction(eps=self.eps),
args, y_grad, **self.check_backward_optionss)
@condition.retry(10)
def test_backward_cpu(self):
self.check_backward(self.args, self.gy)
@attr.gpu
@condition.retry(10)
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(i) for i in self.args], cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'ndim': [0, 1, 2, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFixedBatchNormalization(unittest.TestCase):
def setUp(self):
self.gamma = numpy.random.uniform(.5, 1, (3,)).astype(self.dtype)
self.beta = numpy.random.uniform(-1, 1, (3,)).astype(self.dtype)
self.expander = (None, Ellipsis) + (None,) * self.ndim
shape = (7, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.eps = 1e-5
self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(self.dtype)
self.var = numpy.random.uniform(
0.5, 1, (3,)).astype(self.dtype)
self.args = [self.x, self.gamma, self.beta, self.mean, self.var]
self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_optionss = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_optionss = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_optionss = {
'eps': 2 ** -5, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward(self, args):
y = functions.fixed_batch_normalization(
*[chainer.Variable(i) for i in args], eps=self.eps)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = _batch_normalization(
self.expander, self.gamma, self.beta, self.x, self.mean, self.var)
gradient_check.assert_allclose(
y_expect, y.data, **self.check_forward_optionss)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.args)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(i) for i in self.args])
def check_backward(self, args, y_grad):
gradient_check.check_backward(
batch_normalization.BatchNormalizationFunction(eps=self.eps),
args, y_grad, **self.check_backward_optionss)
@condition.retry(10)
def test_backward_cpu(self):
self.check_backward(self.args, self.gy)
@attr.gpu
@condition.retry(10)
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(i) for i in self.args], cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 35.155844
| 78
| 0.631326
| 744
| 5,414
| 4.456989
| 0.122312
| 0.054282
| 0.054282
| 0.051568
| 0.838058
| 0.822376
| 0.805489
| 0.784077
| 0.784077
| 0.784077
| 0
| 0.02836
| 0.224972
| 5,414
| 153
| 79
| 35.385621
| 0.761916
| 0
| 0
| 0.75
| 0
| 0
| 0.017362
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.125
| false
| 0
| 0.091667
| 0
| 0.241667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
af9a58ff491811d18db56a8534c2c7b9bd3586a7
| 63
|
py
|
Python
|
v0.9.2/walletrpc/__init__.py
|
lncm/lnd-proto
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | 2
|
2020-02-10T09:46:06.000Z
|
2020-04-09T19:30:30.000Z
|
v0.9.2/walletrpc/__init__.py
|
lncm/lnd-rpc
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | 1
|
2020-02-04T16:34:35.000Z
|
2020-02-04T16:34:35.000Z
|
v0.9.2/walletrpc/__init__.py
|
lncm/lnd-proto
|
8caa6558efe043413560f807ef44b11699901d76
|
[
"MIT"
] | null | null | null |
from .walletkit_pb2 import *
from .walletkit_pb2_grpc import *
| 21
| 33
| 0.809524
| 9
| 63
| 5.333333
| 0.555556
| 0.541667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.126984
| 63
| 2
| 34
| 31.5
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
afc2268887de87d035e6977c31d4865240e57c1a
| 8,281
|
py
|
Python
|
tests/grep_test.py
|
wojdatto/pyzet
|
4d737e7b3bd38879da37ffe6336962fb9f82e611
|
[
"Apache-2.0"
] | 2
|
2022-01-23T21:23:04.000Z
|
2022-01-24T00:21:24.000Z
|
tests/grep_test.py
|
wojdatto/pyzet
|
4d737e7b3bd38879da37ffe6336962fb9f82e611
|
[
"Apache-2.0"
] | 12
|
2022-01-24T21:19:05.000Z
|
2022-02-18T21:10:05.000Z
|
tests/grep_test.py
|
wojdatto/pyzet
|
4d737e7b3bd38879da37ffe6336962fb9f82e611
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import pyzet.constants as C
from pyzet.main import main
GREP_CMD = ("--config", f"testing/{C.CONFIG_FILE}", "grep")
def test_grep(capfd):
main([*GREP_CMD, "zet"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
# Another zet test entry
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_ignore_case(capfd):
main([*GREP_CMD, "--ignore-case", "zet"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
# Zet test entry
20211016223643/README.md
# Another zet test entry
20220101220852/README.md
# Zettel with UTF-8
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_title(capfd):
main([*GREP_CMD, "--title", "Hello"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
# Zet test entry
Hello there!
20211016223643/README.md
# Another zet test entry
Hello everyone
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_line_number(capfd):
main([*GREP_CMD, "--line-number", "Hello"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
3:Hello there!
20211016223643/README.md
3:Hello everyone
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_title_and_line_number(capfd):
main([*GREP_CMD, "--title", "--line-number", "Hello"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
1:# Zet test entry
3:Hello there!
20211016223643/README.md
1:# Another zet test entry
3:Hello everyone
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_matches_in_file(capfd):
main([*GREP_CMD, "test"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
# Zet test entry
#test-tag #another-tag #tag-after-two-spaces
20211016223643/README.md
# Another zet test entry
#test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_matches_in_file_title(capfd):
# Title matches searched pattern, so --title doesn't make a difference.
main([*GREP_CMD, "--title", "test"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
# Zet test entry
#test-tag #another-tag #tag-after-two-spaces
20211016223643/README.md
# Another zet test entry
#test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_matches_in_file_line_number(capfd):
main([*GREP_CMD, "--line-number", "test"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
1:# Zet test entry
7: #test-tag #another-tag #tag-after-two-spaces
20211016223643/README.md
1:# Another zet test entry
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_matches_in_file_title_and_line_number(capfd):
# Title matches searched pattern, so --title doesn't make a difference.
main([*GREP_CMD, "--title", "--line-number", "test"])
out, err = capfd.readouterr()
expected = """\
20211016205158/README.md
1:# Zet test entry
7: #test-tag #another-tag #tag-after-two-spaces
20211016223643/README.md
1:# Another zet test entry
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_two_patterns(capfd):
main([*GREP_CMD, "everyone", "test-tag"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
Hello everyone
#test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_three_patterns(capfd):
main([*GREP_CMD, "everyone", "test-tag", "zet"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
# Another zet test entry
Hello everyone
#test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_three_patterns_verbose(capfd):
main([*GREP_CMD, "everyone", "test-tag", "--", "--or", "-e", "zet"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
# Another zet test entry
Hello everyone
#test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_two_patterns_line_number(capfd):
main([*GREP_CMD, "--line-number", "everyone", "test-tag"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_two_patterns_line_number_last(capfd):
main([*GREP_CMD, "everyone", "test-tag", "--line-number"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_two_patterns_error_argparse(capfd):
with pytest.raises(SystemExit):
main([*GREP_CMD, "everyone", "--line-number", "test-tag"])
out, err = capfd.readouterr()
assert out == ""
assert err.endswith("pyzet: error: unrecognized arguments: test-tag\n")
def test_grep_two_patterns_error_argparse_weird(capfd):
# I'm not sure why it fails, but it does, so this test confirms it.
with pytest.raises(SystemExit):
main([*GREP_CMD, "everyone", "test-tag", "--line-number", "--", "--no-color"])
out, err = capfd.readouterr()
assert out == ""
assert err.endswith("pyzet: error: unrecognized arguments: -- --no-color\n")
def test_grep_title_with_options_error_weird(capfd):
# I'm not sure why it fails, but it does, so this test confirms it.
#
# The only difference between this one and test_grep_title_with_options()
# is the order of arguments.
with pytest.raises(SystemExit):
main([*GREP_CMD, "everyone", "--title", "--", "--or", "-e", "test-tag"])
out, err = capfd.readouterr()
assert out == ""
assert err.endswith("pyzet: error: unrecognized arguments: -- --or -e test-tag\n")
def test_grep_two_patterns_line_number_verbose(capfd):
main([*GREP_CMD, "everyone", "test-tag", "--", "--line-number"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_patterns_line_number(capfd):
main(
[*GREP_CMD, "everyone", "test-tag", "--", "--line-number", "--or", "-e", "zet"]
)
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
1:# Another zet test entry
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_multiple_patterns_line_number_different_order(capfd):
main(
[*GREP_CMD, "everyone", "test-tag", "--", "--or", "-e", "zet", "--line-number"]
)
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
1:# Another zet test entry
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_with_option_and_pattern(capfd):
# --and means that matching line should always have its pattern
main([*GREP_CMD, "--title", "--ignore-case", "zet", "--", "--and", "-e", "another"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
# Another zet test entry
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_line_number_with_options(capfd):
main([*GREP_CMD, "--line-number", "everyone", "--", "--or", "-e", "test-tag"])
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
def test_grep_title_and_line_number_with_options(capfd):
main(
[
*GREP_CMD,
"--title",
"--line-number",
"everyone",
"--",
"--or",
"-e",
"test-tag",
]
)
out, err = capfd.readouterr()
expected = """\
20211016223643/README.md
1:# Another zet test entry
3:Hello everyone
7: #test-tag
"""
assert out.replace("\r", "") == expected
assert err == ""
| 23.592593
| 88
| 0.630238
| 1,052
| 8,281
| 4.825095
| 0.102662
| 0.045508
| 0.052009
| 0.095154
| 0.912727
| 0.894602
| 0.885343
| 0.865051
| 0.786643
| 0.769504
| 0
| 0.065944
| 0.197923
| 8,281
| 350
| 89
| 23.66
| 0.698284
| 0.052168
| 0
| 0.740458
| 0
| 0
| 0.353571
| 0.102423
| 0
| 0
| 0
| 0
| 0.175573
| 1
| 0.087786
| false
| 0
| 0.01145
| 0
| 0.099237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
afc51a596e729b75216dd1523e4f746e733f823e
| 10,656
|
py
|
Python
|
src/MiniJavaListener.py
|
simonwangao/MiniJava_Compiler
|
32086af4e984d06301272744e6e755bbaef195bc
|
[
"Apache-2.0"
] | null | null | null |
src/MiniJavaListener.py
|
simonwangao/MiniJava_Compiler
|
32086af4e984d06301272744e6e755bbaef195bc
|
[
"Apache-2.0"
] | null | null | null |
src/MiniJavaListener.py
|
simonwangao/MiniJava_Compiler
|
32086af4e984d06301272744e6e755bbaef195bc
|
[
"Apache-2.0"
] | 3
|
2020-01-14T12:12:35.000Z
|
2020-10-04T06:07:46.000Z
|
# Generated from MiniJava.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .MiniJavaParser import MiniJavaParser
else:
from MiniJavaParser import MiniJavaParser
# This class defines a complete listener for a parse tree produced by MiniJavaParser.
class MiniJavaListener(ParseTreeListener):
# Enter a parse tree produced by MiniJavaParser#goal.
def enterGoal(self, ctx:MiniJavaParser.GoalContext):
pass
# Exit a parse tree produced by MiniJavaParser#goal.
def exitGoal(self, ctx:MiniJavaParser.GoalContext):
pass
# Enter a parse tree produced by MiniJavaParser#mainclass.
def enterMainclass(self, ctx:MiniJavaParser.MainclassContext):
pass
# Exit a parse tree produced by MiniJavaParser#mainclass.
def exitMainclass(self, ctx:MiniJavaParser.MainclassContext):
pass
# Enter a parse tree produced by MiniJavaParser#dec_class.
def enterDec_class(self, ctx:MiniJavaParser.Dec_classContext):
pass
# Exit a parse tree produced by MiniJavaParser#dec_class.
def exitDec_class(self, ctx:MiniJavaParser.Dec_classContext):
pass
# Enter a parse tree produced by MiniJavaParser#dec_var.
def enterDec_var(self, ctx:MiniJavaParser.Dec_varContext):
pass
# Exit a parse tree produced by MiniJavaParser#dec_var.
def exitDec_var(self, ctx:MiniJavaParser.Dec_varContext):
pass
# Enter a parse tree produced by MiniJavaParser#dec_method.
def enterDec_method(self, ctx:MiniJavaParser.Dec_methodContext):
pass
# Exit a parse tree produced by MiniJavaParser#dec_method.
def exitDec_method(self, ctx:MiniJavaParser.Dec_methodContext):
pass
# Enter a parse tree produced by MiniJavaParser#mtype.
def enterMtype(self, ctx:MiniJavaParser.MtypeContext):
pass
# Exit a parse tree produced by MiniJavaParser#mtype.
def exitMtype(self, ctx:MiniJavaParser.MtypeContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_lrparents.
def enterState_lrparents(self, ctx:MiniJavaParser.State_lrparentsContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_lrparents.
def exitState_lrparents(self, ctx:MiniJavaParser.State_lrparentsContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_if.
def enterState_if(self, ctx:MiniJavaParser.State_ifContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_if.
def exitState_if(self, ctx:MiniJavaParser.State_ifContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_while.
def enterState_while(self, ctx:MiniJavaParser.State_whileContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_while.
def exitState_while(self, ctx:MiniJavaParser.State_whileContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_print.
def enterState_print(self, ctx:MiniJavaParser.State_printContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_print.
def exitState_print(self, ctx:MiniJavaParser.State_printContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_assign.
def enterState_assign(self, ctx:MiniJavaParser.State_assignContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_assign.
def exitState_assign(self, ctx:MiniJavaParser.State_assignContext):
pass
# Enter a parse tree produced by MiniJavaParser#state_array_assign.
def enterState_array_assign(self, ctx:MiniJavaParser.State_array_assignContext):
pass
# Exit a parse tree produced by MiniJavaParser#state_array_assign.
def exitState_array_assign(self, ctx:MiniJavaParser.State_array_assignContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_miss_RHS.
def enterErr_miss_RHS(self, ctx:MiniJavaParser.Err_miss_RHSContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_miss_RHS.
def exitErr_miss_RHS(self, ctx:MiniJavaParser.Err_miss_RHSContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_lparent_closing.
def enterErr_lparent_closing(self, ctx:MiniJavaParser.Err_lparent_closingContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_lparent_closing.
def exitErr_lparent_closing(self, ctx:MiniJavaParser.Err_lparent_closingContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_this.
def enterExpr_this(self, ctx:MiniJavaParser.Expr_thisContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_this.
def exitExpr_this(self, ctx:MiniJavaParser.Expr_thisContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_many_lparents.
def enterErr_many_lparents(self, ctx:MiniJavaParser.Err_many_lparentsContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_many_lparents.
def exitErr_many_lparents(self, ctx:MiniJavaParser.Err_many_lparentsContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_op_multi.
def enterExpr_op_multi(self, ctx:MiniJavaParser.Expr_op_multiContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_op_multi.
def exitExpr_op_multi(self, ctx:MiniJavaParser.Expr_op_multiContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_bool.
def enterExpr_bool(self, ctx:MiniJavaParser.Expr_boolContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_bool.
def exitExpr_bool(self, ctx:MiniJavaParser.Expr_boolContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_length.
def enterExpr_length(self, ctx:MiniJavaParser.Expr_lengthContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_length.
def exitExpr_length(self, ctx:MiniJavaParser.Expr_lengthContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_rparent_closing.
def enterErr_rparent_closing(self, ctx:MiniJavaParser.Err_rparent_closingContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_rparent_closing.
def exitErr_rparent_closing(self, ctx:MiniJavaParser.Err_rparent_closingContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_op_and.
def enterExpr_op_and(self, ctx:MiniJavaParser.Expr_op_andContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_op_and.
def exitExpr_op_and(self, ctx:MiniJavaParser.Expr_op_andContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_lrparents.
def enterExpr_lrparents(self, ctx:MiniJavaParser.Expr_lrparentsContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_lrparents.
def exitExpr_lrparents(self, ctx:MiniJavaParser.Expr_lrparentsContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_many_rparents.
def enterErr_many_rparents(self, ctx:MiniJavaParser.Err_many_rparentsContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_many_rparents.
def exitErr_many_rparents(self, ctx:MiniJavaParser.Err_many_rparentsContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_array.
def enterExpr_array(self, ctx:MiniJavaParser.Expr_arrayContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_array.
def exitExpr_array(self, ctx:MiniJavaParser.Expr_arrayContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_int.
def enterExpr_int(self, ctx:MiniJavaParser.Expr_intContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_int.
def exitExpr_int(self, ctx:MiniJavaParser.Expr_intContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_int_array.
def enterExpr_int_array(self, ctx:MiniJavaParser.Expr_int_arrayContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_int_array.
def exitExpr_int_array(self, ctx:MiniJavaParser.Expr_int_arrayContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_op_minus.
def enterExpr_op_minus(self, ctx:MiniJavaParser.Expr_op_minusContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_op_minus.
def exitExpr_op_minus(self, ctx:MiniJavaParser.Expr_op_minusContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_op_plus.
def enterExpr_op_plus(self, ctx:MiniJavaParser.Expr_op_plusContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_op_plus.
def exitExpr_op_plus(self, ctx:MiniJavaParser.Expr_op_plusContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_new_array.
def enterExpr_new_array(self, ctx:MiniJavaParser.Expr_new_arrayContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_new_array.
def exitExpr_new_array(self, ctx:MiniJavaParser.Expr_new_arrayContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_op_less.
def enterExpr_op_less(self, ctx:MiniJavaParser.Expr_op_lessContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_op_less.
def exitExpr_op_less(self, ctx:MiniJavaParser.Expr_op_lessContext):
pass
# Enter a parse tree produced by MiniJavaParser#err_miss_LHS.
def enterErr_miss_LHS(self, ctx:MiniJavaParser.Err_miss_LHSContext):
pass
# Exit a parse tree produced by MiniJavaParser#err_miss_LHS.
def exitErr_miss_LHS(self, ctx:MiniJavaParser.Err_miss_LHSContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_method_calling.
def enterExpr_method_calling(self, ctx:MiniJavaParser.Expr_method_callingContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_method_calling.
def exitExpr_method_calling(self, ctx:MiniJavaParser.Expr_method_callingContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_not.
def enterExpr_not(self, ctx:MiniJavaParser.Expr_notContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_not.
def exitExpr_not(self, ctx:MiniJavaParser.Expr_notContext):
pass
# Enter a parse tree produced by MiniJavaParser#expr_id.
def enterExpr_id(self, ctx:MiniJavaParser.Expr_idContext):
pass
# Exit a parse tree produced by MiniJavaParser#expr_id.
def exitExpr_id(self, ctx:MiniJavaParser.Expr_idContext):
pass
| 33.615142
| 86
| 0.747654
| 1,341
| 10,656
| 5.729306
| 0.085757
| 0.053885
| 0.089809
| 0.161656
| 0.894442
| 0.86789
| 0.863465
| 0.703241
| 0.664194
| 0.271899
| 0
| 0.000582
| 0.193412
| 10,656
| 316
| 87
| 33.721519
| 0.89331
| 0.383352
| 0
| 0.478873
| 1
| 0
| 0.000156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.478873
| false
| 0.478873
| 0.021127
| 0
| 0.507042
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
bb7389b9a85690df40129293a98c4485cec8a41e
| 5,940
|
py
|
Python
|
tests/orbit/models/test_ktrlite.py
|
pochoi/orbit
|
2d5728ccb032a28e4f8cef3dd40b85d2f0d90e35
|
[
"Apache-2.0"
] | 1
|
2021-08-17T06:52:43.000Z
|
2021-08-17T06:52:43.000Z
|
tests/orbit/models/test_ktrlite.py
|
pochoi/orbit
|
2d5728ccb032a28e4f8cef3dd40b85d2f0d90e35
|
[
"Apache-2.0"
] | null | null | null |
tests/orbit/models/test_ktrlite.py
|
pochoi/orbit
|
2d5728ccb032a28e4f8cef3dd40b85d2f0d90e35
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import numpy as np
import pandas as pd
from orbit.estimators.stan_estimator import StanEstimatorMAP
from orbit.models.ktrlite import BaseKTRLite, KTRLiteMAP
from orbit.diagnostics.metrics import smape
@pytest.mark.parametrize("estimator_type", [StanEstimatorMAP])
def test_ktrlite_single_seas(make_daily_data, estimator_type):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[365.25],
seasonality_fs_order=[5],
estimator_type=estimator_type
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
@pytest.mark.parametrize("estimator_type", [StanEstimatorMAP])
def test_ktrlite_dual_seas(make_daily_data, estimator_type):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=estimator_type
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
@pytest.mark.parametrize("level_knot_dates", [pd.date_range(start='2016-03-01', end='2019-01-01', freq='3M'),
pd.date_range(start='2016-03-01', end='2019-01-01', freq='6M')])
def test_ktrlite_level_knot_dates(make_daily_data, level_knot_dates):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_dates=level_knot_dates,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
@pytest.mark.parametrize("level_knot_length", [90, 120])
def test_ktrlite_level_knot_distance(make_daily_data, level_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
level_knot_length=level_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
@pytest.mark.parametrize("coefficients_knot_length", [90, 120])
def test_ktrlite_level_knot_distance(make_daily_data, coefficients_knot_length):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
coefficients_knot_length=coefficients_knot_length,
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
def test_ktrlite_predict_decompose(make_daily_data):
train_df, test_df, coef = make_daily_data
ktrlite = KTRLiteMAP(
response_col='response',
date_col='date',
seasonality=[7, 365.25],
seasonality_fs_order=[2, 5],
estimator_type=StanEstimatorMAP
)
ktrlite.fit(train_df)
predict_df = ktrlite.predict(test_df, decompose=True)
expected_columns = ['date', 'prediction_5', 'prediction', 'prediction_95',
'trend_5', 'trend', 'trend_95',
'seasonality_7_5', 'seasonality_7', 'seasonality_7_95',
'seasonality_365.25_5', 'seasonality_365.25', 'seasonality_365.25_95']
expected_shape = (364, len(expected_columns))
expected_num_parameters = 6
assert predict_df.shape == expected_shape
assert predict_df.columns.tolist() == expected_columns
assert len(ktrlite._posterior_samples) == expected_num_parameters
assert smape(test_df['response'].values, predict_df['prediction'].values) <= 0.5
| 36.219512
| 110
| 0.70404
| 732
| 5,940
| 5.368852
| 0.114754
| 0.054962
| 0.039695
| 0.019847
| 0.882188
| 0.863613
| 0.863613
| 0.863613
| 0.850382
| 0.823155
| 0
| 0.035323
| 0.185017
| 5,940
| 163
| 111
| 36.441718
| 0.776492
| 0
| 0
| 0.746032
| 0
| 0
| 0.112121
| 0.007576
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb7b4927ceac387e33925c9d21eb61fc8eb64933
| 104
|
py
|
Python
|
plugin/sqlite/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
plugin/sqlite/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
plugin/sqlite/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
from build_tools.plugin.sqlite import executor
def get_plugin_class():
return executor.SqlitePlugin
| 26
| 46
| 0.826923
| 14
| 104
| 5.928571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 104
| 4
| 47
| 26
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
bb8123ad7bdfebe54f3ce6fa20aa97cc971458ad
| 15,977
|
py
|
Python
|
tests/test_api/test_user.py
|
cipherboy/modern-paste
|
ecc4168bda2a9e5981d495f9e0538258d9f727a2
|
[
"MIT"
] | 271
|
2016-02-03T03:09:25.000Z
|
2021-12-12T02:21:03.000Z
|
tests/test_api/test_user.py
|
cipherboy/modern-paste
|
ecc4168bda2a9e5981d495f9e0538258d9f727a2
|
[
"MIT"
] | 65
|
2016-02-03T07:20:16.000Z
|
2019-01-09T00:10:10.000Z
|
tests/test_api/test_user.py
|
cipherboy/modern-paste
|
ecc4168bda2a9e5981d495f9e0538258d9f727a2
|
[
"MIT"
] | 64
|
2016-02-03T17:08:32.000Z
|
2021-05-23T08:48:22.000Z
|
import json
import mock
from sqlalchemy.exc import SQLAlchemyError
import config
import constants.api
import database.user
import util.testing
from uri.user import *
from uri.authentication import *
class TestPaste(util.testing.DatabaseTestCase):
def test_create_new_user_invalid_data(self):
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
def test_create_new_user_invalid_username(self):
util.testing.UserFactory.generate(username='username')
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual('username_not_available_failure', json.loads(resp.data)['failure'])
def test_create_new_user_invalid_email(self):
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
'email': 'invalid',
}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual('invalid_email_failure', json.loads(resp.data)['failure'])
def test_create_new_user_disabled(self):
config.ENABLE_USER_REGISTRATION = False
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.USER_REGISTRATION_DISABLED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.USER_REGISTRATION_DISABLED_FAILURE, json.loads(resp.data))
def test_create_new_user_valid(self):
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'other_username',
'password': 'password',
'name': 'name',
'email': 'test@test.com',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
def test_create_new_user_server_error(self):
with mock.patch.object(database.user, 'create_new_user', side_effect=SQLAlchemyError):
resp = self.client.post(
UserCreateURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_update_user_details(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'name': 'name',
'email': 'email@email.com',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual('name', database.user.get_user_by_id(user.user_id).name)
self.assertEqual('email@email.com', database.user.get_user_by_id(user.user_id).email)
self.assertTrue(database.user.authenticate_user('username', 'password'))
def test_update_user_password(self):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'current_password': 'password',
'new_password': 'new_password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(database.user.authenticate_user('username', 'password'))
self.assertTrue(database.user.authenticate_user('username', 'new_password'))
def test_update_user_details_invalid_email(self):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'name': 'name',
'email': 'email',
}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual('invalid_email_failure', json.loads(resp.data)['failure'])
def test_update_user_details_wrong_current_password(self):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'current_password': 'invalid',
'new_password': 'new_password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertTrue(database.user.authenticate_user('username', 'password'))
self.assertFalse(database.user.authenticate_user('username', 'invalid'))
def test_remove_user_details(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'name': None,
'email': None,
'new_password': None,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertIsNone(database.user.get_user_by_id(user.user_id).name)
self.assertIsNone(database.user.get_user_by_id(user.user_id).email)
self.assertTrue(database.user.authenticate_user('username', 'password'))
def test_update_user_details_server_error(self):
with mock.patch.object(database.user, 'update_user_details', side_effect=SQLAlchemyError):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserUpdateDetailsURI.uri(),
data=json.dumps({
'name': None,
'email': None,
'new_password': None,
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_deactivate_user_not_logged_in(self):
util.testing.UserFactory.generate()
resp = self.client.post(
UserDeactivateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.AUTH_FAILURE, json.loads(resp.data))
def test_deactivate_user_logged_in(self):
user = util.testing.UserFactory.generate(username='username', password='password')
resp = self.client.post(
LoginUserURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
resp = self.client.post(
UserDeactivateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(database.user.get_user_by_id(user.user_id).is_active)
def test_deactivate_user_api_key(self):
user = util.testing.UserFactory.generate()
resp = self.client.post(
UserDeactivateURI.uri(),
data=json.dumps({
'api_key': 'invalid',
}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.AUTH_FAILURE, json.loads(resp.data))
resp = self.client.post(
UserDeactivateURI.uri(),
data=json.dumps({
'api_key': user.api_key,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(database.user.get_user_by_id(user.user_id).is_active)
def test_deactivate_user_server_error(self):
with mock.patch.object(database.user, 'deactivate_user', side_effect=SQLAlchemyError):
user = util.testing.UserFactory.generate()
resp = self.client.post(
UserDeactivateURI.uri(),
data=json.dumps({
'api_key': user.api_key,
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_api_key_regenerate(self):
old_api_key = util.testing.UserFactory.generate(username='username', password='password').api_key
self.api_login_user('username', 'password')
resp = self.client.post(
UserAPIKeyRegenerateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
new_key = json.loads(resp.data)['api_key']
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(64, len(new_key))
self.assertNotEqual(old_api_key, new_key)
def test_api_key_regenerate_server_error(self):
with mock.patch.object(database.user, 'generate_new_api_key', side_effect=SQLAlchemyError):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
UserAPIKeyRegenerateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_check_username_availability_invalid_data(self):
resp = self.client.post(
CheckUsernameAvailabilityURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
def test_check_username_availability_available(self):
resp = self.client.post(
CheckUsernameAvailabilityURI.uri(),
data=json.dumps({
'username': 'username',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertTrue(json.loads(resp.data)['is_available'])
def test_check_username_availability_unavailable(self):
util.testing.UserFactory.generate(username='username')
resp = self.client.post(
CheckUsernameAvailabilityURI.uri(),
data=json.dumps({
'username': 'username',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(json.loads(resp.data)['is_available'])
# Case-insensitivity
resp = self.client.post(
CheckUsernameAvailabilityURI.uri(),
data=json.dumps({
'username': 'useRNaME',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(json.loads(resp.data)['is_available'])
def test_check_username_availability_server_error(self):
with mock.patch.object(database.user, 'is_username_available', side_effect=SQLAlchemyError):
resp = self.client.post(
CheckUsernameAvailabilityURI.uri(),
data=json.dumps({
'username': 'username',
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_validate_email_address_invalid_data(self):
resp = self.client.post(
ValidateEmailAddressURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
def test_validate_email_address_valid(self):
for email in ['test@test.com', 'test@test.co.uk', 'test.test.test@test.a.b.s']:
resp = self.client.post(
ValidateEmailAddressURI.uri(),
data=json.dumps({
'email': email,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertTrue(json.loads(resp.data)['is_valid'])
def test_validate_email_address_invalid(self):
for email in ['invalid', 'test@', 'test@', '@test.com', 'spaces in@address.com']:
resp = self.client.post(
ValidateEmailAddressURI.uri(),
data=json.dumps({
'email': email,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(json.loads(resp.data)['is_valid'])
def test_validate_email_address_server_error(self):
with mock.patch.object(database.user, 'is_email_address_valid', side_effect=SQLAlchemyError):
resp = self.client.post(
ValidateEmailAddressURI.uri(),
data=json.dumps({
'email': 'test@test.com',
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
| 42.155673
| 105
| 0.612881
| 1,620
| 15,977
| 5.828395
| 0.068519
| 0.076255
| 0.104215
| 0.117242
| 0.910718
| 0.897691
| 0.888795
| 0.862847
| 0.845478
| 0.820695
| 0
| 0.000172
| 0.270702
| 15,977
| 378
| 106
| 42.267196
| 0.810161
| 0.001127
| 0
| 0.702312
| 0
| 0
| 0.114119
| 0.008774
| 0
| 0
| 0
| 0
| 0.184971
| 1
| 0.075145
| false
| 0.106936
| 0.026012
| 0
| 0.104046
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
bbe08c1cf2489a7c1206a95bdf8d590d6c368a31
| 5,675
|
py
|
Python
|
914.x-of-a-kind-in-a-deck-of-cards.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
914.x-of-a-kind-in-a-deck-of-cards.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
914.x-of-a-kind-in-a-deck-of-cards.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# @lc app=leetcode id=914 lang=python
#
# [914] X of a Kind in a Deck of Cards
#
# https://leetcode.com/problems/x-of-a-kind-in-a-deck-of-cards/description/
#
# algorithms
# Easy (33.91%)
# Likes: 256
# Dislikes: 64
# Total Accepted: 21.8K
# Total Submissions: 64.4K
# Testcase Example: '[1,2,3,4,4,3,2,1]'
#
# In a deck of cards, each card has an integer written on it.
#
# Return true if and only if you can choose X >= 2 such that it is possible to
# split the entire deck into 1 or more groups of cards, where:
#
#
# Each group has exactly X cards.
# All the cards in each group have the same integer.
#
#
#
#
# Example 1:
#
#
# Input: [1,2,3,4,4,3,2,1]
# Output: true
# Explanation: Possible partition [1,1],[2,2],[3,3],[4,4]
#
#
#
# Example 2:
#
#
# Input: [1,1,1,2,2,2,3,3]
# Output: false
# Explanation: No possible partition.
#
#
#
# Example 3:
#
#
# Input: [1]
# Output: false
# Explanation: No possible partition.
#
#
#
# Example 4:
#
#
# Input: [1,1]
# Output: true
# Explanation: Possible partition [1,1]
#
#
#
# Example 5:
#
#
# Input: [1,1,2,2,2,2]
# Output: true
# Explanation: Possible partition [1,1],[2,2],[2,2]
#
#
#
#
#
#
#
# Note:
#
#
# 1 <= deck.length <= 10000
# 0 <= deck[i] < 10000
#
#
#
#
#
#
#
#
#
#
#
#
#
class Solution(object):
def _hasGroupsSizeX(self, deck):
"""
:type deck: List[int]
:rtype: bool
"""
# Wrong Answer
# [1,1,2,2,2,2]
# 可以是最小因子的倍数
data = {}
for d in deck:
data[d] = data.get(d, 0) + 1
return len(set(data.values())) == 1 and data.values()[0] > 1
def __hasGroupsSizeX(self, deck):
"""
:type deck: List[int]
:rtype: bool
"""
# Still Wrong
# [1,1,1,1,2,2,2,2,2,2]
# 可以有最小公因数
# too complex
data = {}
for d in deck:
data[d] = data.get(d, 0) + 1
min_factor = None
values = data.values()
if not values:
return False
if values:
if min(values) < 2:
return False
for i in range(len(values)-1):
factor = self.gcd(values[i], values[i+1])
if factor < 2:
return False
if not min_factor:
min_factor = factor
else:
if min_factor < factor:
if factor % min_factor:
return False
elif min_factor > factor:
if min_factor % factor:
return False
else:
min_factor = factor
return True
def hasGroupsSizeX(self, deck):
"""
:type deck: List[int]
:rtype: bool
"""
data = {}
for d in deck:
data[d] = data.get(d, 0) + 1
min_value = min(data.values())
for value in data.values():
if self.gcd(value, min_value) < 2:
return False
return True
def gcd(self, a, b):
while b:
a, b = b, a % b
return a
# if __name__ == '__main__':
# s = Solution()
# print s.hasGroupsSizeX([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19])
# print s.hasGroupsSizeX([1,2,3,4,4,3,2,1])
# print s.hasGroupsSizeX([1,1,2,2,2,2])
# print s.hasGroupsSizeX([2,2])
# print s.hasGroupsSizeX([1])
# print s.hasGroupsSizeX([1,1,1,1,2,2,2,2,2,2])
# print s.gcd(1, 3)
# print s.gcd(2, 3)
# print s.gcd(3, 6)
# print s.gcd(6, 8)
# print s.gcd(6, 6)
| 30.842391
| 2,123
| 0.506256
| 1,497
| 5,675
| 1.905144
| 0.09352
| 0.329593
| 0.482819
| 0.638149
| 0.605891
| 0.587658
| 0.559958
| 0.512272
| 0.489481
| 0.4446
| 0
| 0.283799
| 0.224493
| 5,675
| 183
| 2,124
| 31.010929
| 0.364235
| 0.662203
| 0
| 0.456522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.326087
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a5727c78c1aef8d3315ce7b20e7a57a2fa112b3e
| 183
|
py
|
Python
|
tests/sat/Models/s3-10.UNSAT.dimacs.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/sat/Models/s3-10.UNSAT.dimacs.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/sat/Models/s3-10.UNSAT.dimacs.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
p cnf 4 14
-4 2 1 0
-2 -3 4 0
1 -2 -4 0
-2 3 -4 0
1 2 3 0
1 2 -3 0
1 -2 3 0
1 -2 -3 0
-1 2 3 0
-1 2 -3 0
-1 -2 3 0
-1 -2 -3 0
1 2 4 0
-2 3 4 0
"""
output = """
unsat
"""
| 8.318182
| 12
| 0.431694
| 63
| 183
| 1.253968
| 0.174603
| 0.278481
| 0.379747
| 0.405063
| 0.670886
| 0.670886
| 0.670886
| 0.620253
| 0.620253
| 0.443038
| 0
| 0.522124
| 0.382514
| 183
| 21
| 13
| 8.714286
| 0.176991
| 0
| 0
| 0.1
| 0
| 0
| 0.825137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a57c6ff9520a6d273a8f6334abeee2710d672394
| 136
|
py
|
Python
|
src/main/resources/docs/tests/E1304.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1304.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1304.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
##Patterns: E1304
##Err: E1304
print "%(arg1)s %(arg2)s" % {"arg1":"wrong"}
print "%(arg1)s %(arg2)s" % {"arg1":"this is", "arg2":"ok"}
| 27.2
| 59
| 0.558824
| 21
| 136
| 3.619048
| 0.52381
| 0.236842
| 0.263158
| 0.368421
| 0.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.117647
| 136
| 5
| 59
| 27.2
| 0.508333
| 0.183824
| 0
| 0
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
3c5710a59ac5f2263e31311a973afded6acdaaa6
| 86
|
py
|
Python
|
create_user.py
|
abdelinho24/flask-shop
|
ef030e6f2f015c74aa0bb00eccd6c88787606290
|
[
"BSD-2-Clause"
] | 1
|
2015-10-10T01:21:58.000Z
|
2015-10-10T01:21:58.000Z
|
create_user.py
|
abdelinho24/flask-shop
|
ef030e6f2f015c74aa0bb00eccd6c88787606290
|
[
"BSD-2-Clause"
] | null | null | null |
create_user.py
|
abdelinho24/flask-shop
|
ef030e6f2f015c74aa0bb00eccd6c88787606290
|
[
"BSD-2-Clause"
] | 1
|
2019-02-22T18:31:45.000Z
|
2019-02-22T18:31:45.000Z
|
from flask_shop.flask_shop import app, bcrypt
from flask_shop.models import User, db
| 21.5
| 45
| 0.825581
| 15
| 86
| 4.533333
| 0.6
| 0.397059
| 0.382353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 3
| 46
| 28.666667
| 0.906667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3c6570bbc51fa5338331adda338511884d4c7d8e
| 7,149
|
py
|
Python
|
chaosLib/litmus/pod_delete/lib/pod_delete.py
|
prateekdegaons1991/experiment-loadtest
|
b53c70fac5b2f7d37df77844b26f79741c74c1b6
|
[
"Apache-2.0"
] | 8
|
2020-04-17T06:34:30.000Z
|
2021-12-18T10:54:50.000Z
|
chaosLib/litmus/pod_delete/lib/pod_delete.py
|
oumkale/test-python
|
1f3d3e42ffbe1bf5ed9df8a0c6038e50129b2c4d
|
[
"Apache-2.0"
] | 15
|
2020-04-18T06:01:53.000Z
|
2022-02-15T08:56:25.000Z
|
chaosLib/litmus/pod_delete/lib/pod_delete.py
|
oumkale/test-python
|
1f3d3e42ffbe1bf5ed9df8a0c6038e50129b2c4d
|
[
"Apache-2.0"
] | 12
|
2020-04-17T05:14:27.000Z
|
2022-03-29T19:24:20.000Z
|
import pkg.types.types as types
import pkg.events.events as events
import logging
import pkg.utils.common.common as common
import pkg.utils.common.pods as pods
from datetime import datetime
import pkg.status.application as status
import pkg.maths.maths as maths
#PreparePodDelete contains the prepration steps before chaos injection
def PreparePodDelete(experimentsDetails , resultDetails, eventsDetails, chaosDetails, clients):
#Waiting for the ramp time before chaos injection
if experimentsDetails.RampTime != 0 :
logging.info("[Ramp]: Waiting for the %s ramp time before injecting chaos",experimentsDetails.RampTime)
common.WaitForDuration(experimentsDetails.RampTime)
# mode for chaos injection
if experimentsDetails.Sequence.lower() == "serial":
err = injectChaosInSerialMode(experimentsDetails, chaosDetails, eventsDetails, resultDetails, clients)
if err != None:
return err
elif experimentsDetails.Sequence.lower() == "parallel":
err = injectChaosInParallelMode(experimentsDetails, chaosDetails, eventsDetails, resultDetails, clients)
if err != None:
return err
else:
return ValueError("{} sequence is not supported".format(experimentsDetails.Sequence))
#Waiting for the ramp time after chaos injection
if experimentsDetails.RampTime != 0 :
logging.info("[Ramp]: Waiting for the %s ramp time after injecting chaos",experimentsDetails.RampTime)
common.WaitForDuration(experimentsDetails.RampTime)
return None
# injectChaosInSerialMode delete the target application pods serial mode(one by one)
def injectChaosInSerialMode(experimentsDetails , chaosDetails , eventsDetails , resultDetails, clients):
#Initialising GracePeriod
GracePeriod = 0
#ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
ChaosStartTimeStamp = datetime.now()
duration = (datetime.now() - ChaosStartTimeStamp).seconds
while duration < experimentsDetails.ChaosDuration:
# Get the target pod details for the chaos execution
# if the target pod is not defined it will derive the random target pod list using pod affected percentage
if experimentsDetails.TargetPods == "" and chaosDetails.AppDetail.Label == "" :
return ValueError("Please provide one of the appLabel or TARGET_PODS")
targetPodList, err = pods.Pods().GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, chaosDetails, clients)
if err != None:
return err
podNames = []
for pod in targetPodList.items:
podNames.append(pod.metadata.name)
logging.info("[Info]: Target pods list, %s", podNames)
if experimentsDetails.EngineName != "" :
msg = "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
events.GenerateEvents(eventsDetails, chaosDetails, "ChaosEngine", clients)
#Deleting the application pod
for pod in targetPodList.items :
logging.info("[Info]: Killing the following pods, PodName : %s", pod.metadata.name)
try:
if experimentsDetails.Force:
clients.clientCoreV1.delete_namespaced_pod(pod.metadata.name, experimentsDetails.AppNS, grace_period_seconds=GracePeriod)
else:
clients.clientCoreV1.delete_namespaced_pod(pod.metadata.name, experimentsDetails.AppNS)
except Exception as exp:
return exp
if chaosDetails.Randomness:
err = common.RandomInterval(experimentsDetails.ChaosInterval)
if err != None:
return err
else:
#Waiting for the chaos interval after chaos injection
if experimentsDetails.ChaosInterval != "":
logging.info("[Wait]: Wait for the chaos interval %s",(experimentsDetails.ChaosInterval))
waitTime = maths.atoi(experimentsDetails.ChaosInterval)
common.WaitForDuration(waitTime)
#Verify the status of pod after the chaos injection
logging.info("[Status]: Verification for the recreation of application pod")
err = status.Application().CheckApplicationStatus(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.Timeout, experimentsDetails.Delay,clients)
if err != None:
return err
duration = (datetime.now() - ChaosStartTimeStamp).seconds
logging.info("[Completion]: %s chaos is done",(experimentsDetails.ExperimentName))
return None
# injectChaosInParallelMode delete the target application pods in parallel mode (all at once)
def injectChaosInParallelMode(experimentsDetails , chaosDetails , eventsDetails , resultDetails, clients):
#Initialising GracePeriod
GracePeriod = 0
#ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
ChaosStartTimeStamp = datetime.now()
duration = (datetime.now() - ChaosStartTimeStamp).seconds
while duration < experimentsDetails.ChaosDuration:
# Get the target pod details for the chaos execution
# if the target pod is not defined it will derive the random target pod list using pod affected percentage
if experimentsDetails.TargetPods == "" and chaosDetails.AppDetail.Label == "" :
return ValueError("Please provide one of the appLabel or TARGET_PODS")
targetPodList, err = pods.Pods().GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, chaosDetails, clients)
if err != None:
return err
podNames = []
for pod in targetPodList.items:
podNames.append(str(pod.metadata.name))
logging.info("[Info]: Target pods list for chaos, %s",(podNames))
if experimentsDetails.EngineName != "" :
msg = "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
events.GenerateEvents(eventsDetails, chaosDetails, "ChaosEngine",clients)
#Deleting the application pod
for pod in targetPodList.items:
logging.info("[Info]: Killing the following pods, PodName : %s", pod.metadata.name)
try:
if experimentsDetails.Force:
clients.clientCoreV1.delete_namespaced_pod(pod.metadata.name, experimentsDetails.AppNS, grace_period_seconds=GracePeriod)
else:
clients.clientCoreV1.delete_namespaced_pod(pod.metadata.name, experimentsDetails.AppNS)
except Exception as err:
return err
if chaosDetails.Randomness:
err = common.RandomInterval(experimentsDetails.ChaosInterval)
if err != None:
return err
else:
#Waiting for the chaos interval after chaos injection
if experimentsDetails.ChaosInterval != "" :
logging.info("[Wait]: Wait for the chaos interval %s", experimentsDetails.ChaosInterval)
waitTime = maths.atoi(experimentsDetails.ChaosInterval)
common.WaitForDuration(waitTime)
#Verify the status of pod after the chaos injection
logging.info("[Status]: Verification for the recreation of application pod")
err = status.Application().CheckApplicationStatus(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients)
if err != None:
return err
duration = (datetime.now() - ChaosStartTimeStamp).seconds
logging.info("[Completion]: %s chaos is done",(experimentsDetails.ExperimentName))
return None
| 43.066265
| 169
| 0.773115
| 779
| 7,149
| 7.077022
| 0.18742
| 0.01306
| 0.01306
| 0.021767
| 0.87339
| 0.853982
| 0.835843
| 0.835843
| 0.804281
| 0.788319
| 0
| 0.001306
| 0.142957
| 7,149
| 165
| 170
| 43.327273
| 0.898482
| 0.160022
| 0
| 0.758929
| 0
| 0
| 0.13018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.071429
| 0
| 0.241071
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5902687127ce3500229b5afdc67749d445fae75d
| 8,799
|
py
|
Python
|
funcChan.py
|
Lyle-zhang/kinetic_schemes
|
dc572bd1eedfddb871767573724cadddc57db76d
|
[
"MIT"
] | 1
|
2021-12-27T11:14:58.000Z
|
2021-12-27T11:14:58.000Z
|
funcChan.py
|
Lyle-zhang/kinetic_schemes
|
dc572bd1eedfddb871767573724cadddc57db76d
|
[
"MIT"
] | null | null | null |
funcChan.py
|
Lyle-zhang/kinetic_schemes
|
dc572bd1eedfddb871767573724cadddc57db76d
|
[
"MIT"
] | 1
|
2021-08-14T13:40:24.000Z
|
2021-08-14T13:40:24.000Z
|
"""
Functions based on Chan 1985 kinetic reaction scheme for biomass pyrolysis.
Reactions evaluated at some temperature.
Functions:
chan1 - primary reactions only
chan2 - primary reactions without moisture
chan3 - primary and secondary reactions
chan4 - primary and secondary reactions without moisture
Reference:
Chan, Kelbon, Krieger, 1985. Fuel, 64(11), pp.1505–1513.
"""
# Modules
# -----------------------------------------------------------------------------
import numpy as np
# Function - primary kinetic reactions from Table 2
# -----------------------------------------------------------------------------
def chan1(rhow, mc, T, dt, nt):
"""
rhow = wood density, kg/m^3
mc = moisture content, %
T = temperature, K
dt = time step, s
nt = total number of time steps
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*rhow
# vector for initial moisture content concentration, kg/m^3
pm = pw*(mc/100)
# vectors to store product concentrations, kg/m^3
pg = np.zeros(nt) # gas
pt = np.zeros(nt) # tar
pc = np.zeros(nt) # char
pv = np.zeros(nt) # water vapor
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas
A2 = 2e8; E2 = 133 # wood -> tar
A3 = 1.08e7; E3 = 121 # wood -> char
A4 = 5.13e6; E4 = 87.9 # moisture -> water vapor
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char
K4 = A4 * np.exp(-E4 / (R * T)) # moisture -> water vapor
# concentrations at each time step for each product, kg/m^3
# reaction rate as r, rho/s
# concentration as density p, kg/m^3
for i in range(1, nt):
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
rmw = K4 * pm[i-1] # moisture -> water vapor rate
pw[i] = pw[i-1] + rww*dt # wood
pg[i] = pg[i-1] + rwg*dt # gas
pt[i] = pt[i-1] + rwt*dt # tar
pc[i] = pc[i-1] + rwc*dt # char
pm[i] = pm[i-1] - rmw*dt # moisture
pv[i] = pv[i-1] + rmw*dt # water vapor
# return the wood, gas, tar, char, moisture, water vapor concentrations
# as a density, kg/m^3
return pw, pg, pt, pc, pm, pv
# Function - primary kinetic reactions w/o moisture from Table 2
# -----------------------------------------------------------------------------
def chan2(rhow, T, dt, nt):
"""
rhow = wood density, kg/m^3
T = temperature, K
dt = time step, s
nt = total number of time steps
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*rhow
# vectors to store product concentrations, kg/m^3
pg = np.zeros(nt) # gas
pt = np.zeros(nt) # tar
pc = np.zeros(nt) # char
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas
A2 = 2e8; E2 = 133 # wood -> tar
A3 = 1.08e7; E3 = 121 # wood -> char
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char
# concentrations at each time step for each product, kg/m^3
# reaction rate as r, rho/s
# concentration as density p, kg/m^3
for i in range(1, nt):
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
pw[i] = pw[i-1] + rww*dt # wood
pg[i] = pg[i-1] + rwg*dt # gas
pt[i] = pt[i-1] + rwt*dt # tar
pc[i] = pc[i-1] + rwc*dt # char
# return the wood, gas, tar, char, moisture, water vapor concentrations
# as a density, kg/m^3
return pw, pg, pt, pc
# Function - primary and secondary reactions from Table 2
# -----------------------------------------------------------------------------
def chan3(rhow, mc, T, dt, nt):
"""
rhow = wood density, kg/m^3
mc = moisture content, %
T = temperature, K
dt = time step, s
nt = total number of time steps
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*rhow
# vector for initial moisture content concentration, kg/m^3
pm = pw*(mc/100)
# vectors to store product concentrations, kg/m^3
pg = np.zeros(nt) # gas
pt = np.zeros(nt) # tar
pc = np.zeros(nt) # char
pv = np.zeros(nt) # water vapor
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas1
A2 = 2e8; E2 = 133 # wood -> tar1
A3 = 1.08e7; E3 = 121 # wood -> char
A4 = 5.13e6; E4 = 87.9 # moisture -> water vapor
A5 = 1.48e6; E5 = 144 # tar -> gas2 + tar2
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas1
K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar1
K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char
K4 = A4 * np.exp(-E4 / (R * T)) # moisture -> water vapor
K5 = A5 * np.exp(-E5 / (R * T)) # tar -> gas2 + tar2
# concentrations at each time step for each product, kg/m^3
# reaction rate as r, rho/s
# concentration as density p, kg/m^3
for i in range(1, nt):
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
rmw = K4 * pm[i-1] # moisture -> water vapor rate
rtt = K5 * pt[i-1] # tar -> gas2 + tar2 rate
pw[i] = pw[i-1] + rww*dt # wood
pg[i] = pg[i-1] + (rwg + 0.9*rtt)*dt # gas
pt[i] = pt[i-1] + (rwt + 0.1*rtt)*dt # tar
pc[i] = pc[i-1] + rwc*dt # char
pm[i] = pm[i-1] - rmw*dt # moisture
pv[i] = pv[i-1] + rmw*dt # water vapor
# return the wood, gas, tar, char, moisture, water vapor concentrations
# as a density, kg/m^3
return pw, pg, pt, pc, pm, pv
# Function - primary and secondary reactions w/o moisture from Table 2
# -----------------------------------------------------------------------------
def chan4(rhow, T, dt, nt):
"""
rhow = wood density, kg/m^3
T = temperature, K
dt = time step, s
nt = total number of time steps
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*rhow
# vectors to store product concentrations, kg/m^3
pg = np.zeros(nt) # gas
pt = np.zeros(nt) # tar
pc = np.zeros(nt) # char
R = 0.008314 # universal gas constant, kJ/mol*K
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas1
A2 = 2e8; E2 = 133 # wood -> tar1
A3 = 1.08e7; E3 = 121 # wood -> char
A5 = 1.48e6; E5 = 144 # tar -> gas2 + tar2
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas1
K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar1
K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char
K5 = A5 * np.exp(-E5 / (R * T)) # tar -> gas2 + tar2
# concentrations at each time step for each product, kg/m^3
# reaction rate as r, rho/s
# concentration as density p, kg/m^3
for i in range(1, nt):
rww = -(K1+K2+K3) * pw[i-1] # wood rate
rwg = K1 * pw[i-1] # wood -> gas rate
rwt = K2 * pw[i-1] # wood -> tar rate
rwc = K3 * pw[i-1] # wood -> char rate
rtt = K5 * pt[i-1] # tar -> gas2 + tar2 rate
pw[i] = pw[i-1] + rww*dt # wood
pg[i] = pg[i-1] + (rwg + 0.9*rtt)*dt # gas
pt[i] = pt[i-1] + (rwt + 0.1*rtt)*dt # tar
pc[i] = pc[i-1] + rwc*dt # char
# return the wood, gas, tar, char, moisture, water vapor concentrations
# as a density, kg/m^3
return pw, pg, pt, pc
| 36.510373
| 79
| 0.483009
| 1,305
| 8,799
| 3.257471
| 0.114943
| 0.018819
| 0.024465
| 0.030111
| 0.912962
| 0.893437
| 0.893437
| 0.893437
| 0.878382
| 0.878382
| 0
| 0.065562
| 0.341289
| 8,799
| 241
| 80
| 36.510373
| 0.667702
| 0.494147
| 0
| 0.954128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036697
| false
| 0
| 0.009174
| 0
| 0.082569
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
59162871e6b1f9a1db64babef53c52a27c8e2a78
| 44
|
py
|
Python
|
vis/__init__.py
|
joeaortiz/gbp
|
5670a498950bfa948da502b2381899ab46f61021
|
[
"MIT"
] | 50
|
2020-03-10T08:49:45.000Z
|
2022-03-24T01:50:24.000Z
|
vis/__init__.py
|
joeaortiz/gbp
|
5670a498950bfa948da502b2381899ab46f61021
|
[
"MIT"
] | 1
|
2022-03-21T02:36:36.000Z
|
2022-03-21T03:03:38.000Z
|
vis/__init__.py
|
joeaortiz/gbp
|
5670a498950bfa948da502b2381899ab46f61021
|
[
"MIT"
] | 11
|
2020-04-24T16:29:48.000Z
|
2022-03-09T07:39:30.000Z
|
from . import vis_scene
from . import ba_vis
| 22
| 23
| 0.795455
| 8
| 44
| 4.125
| 0.625
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 24
| 22
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
591f1f51426c00f086ae91a427aa67db3d54457d
| 22,381
|
py
|
Python
|
sdk/python/pulumi_alicloud/bastionhost/host_account.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/bastionhost/host_account.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/bastionhost/host_account.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostAccountArgs', 'HostAccount']
@pulumi.input_type
class HostAccountArgs:
def __init__(__self__, *,
host_account_name: pulumi.Input[str],
host_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
protocol_name: pulumi.Input[str],
pass_phrase: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a HostAccount resource.
:param pulumi.Input[str] host_account_name: The name of the host account. The name can be up to 128 characters in length.
:param pulumi.Input[str] host_id: The ID of the host for which you want to create an account.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to create an account for the host.
:param pulumi.Input[str] protocol_name: The protocol used by the host account. Valid values: SSH,RDP
:param pulumi.Input[str] pass_phrase: The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
:param pulumi.Input[str] password: The password of the host account.
:param pulumi.Input[str] private_key: The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
"""
pulumi.set(__self__, "host_account_name", host_account_name)
pulumi.set(__self__, "host_id", host_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "protocol_name", protocol_name)
if pass_phrase is not None:
pulumi.set(__self__, "pass_phrase", pass_phrase)
if password is not None:
pulumi.set(__self__, "password", password)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
@property
@pulumi.getter(name="hostAccountName")
def host_account_name(self) -> pulumi.Input[str]:
"""
The name of the host account. The name can be up to 128 characters in length.
"""
return pulumi.get(self, "host_account_name")
@host_account_name.setter
def host_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "host_account_name", value)
@property
@pulumi.getter(name="hostId")
def host_id(self) -> pulumi.Input[str]:
"""
The ID of the host for which you want to create an account.
"""
return pulumi.get(self, "host_id")
@host_id.setter
def host_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to create an account for the host.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="protocolName")
def protocol_name(self) -> pulumi.Input[str]:
"""
The protocol used by the host account. Valid values: SSH,RDP
"""
return pulumi.get(self, "protocol_name")
@protocol_name.setter
def protocol_name(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol_name", value)
@property
@pulumi.getter(name="passPhrase")
def pass_phrase(self) -> Optional[pulumi.Input[str]]:
"""
The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
"""
return pulumi.get(self, "pass_phrase")
@pass_phrase.setter
def pass_phrase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pass_phrase", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the host account.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@pulumi.input_type
class _HostAccountState:
def __init__(__self__, *,
host_account_id: Optional[pulumi.Input[str]] = None,
host_account_name: Optional[pulumi.Input[str]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
pass_phrase: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
protocol_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostAccount resources.
:param pulumi.Input[str] host_account_id: Hosting account ID.
:param pulumi.Input[str] host_account_name: The name of the host account. The name can be up to 128 characters in length.
:param pulumi.Input[str] host_id: The ID of the host for which you want to create an account.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to create an account for the host.
:param pulumi.Input[str] pass_phrase: The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
:param pulumi.Input[str] password: The password of the host account.
:param pulumi.Input[str] private_key: The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
:param pulumi.Input[str] protocol_name: The protocol used by the host account. Valid values: SSH,RDP
"""
if host_account_id is not None:
pulumi.set(__self__, "host_account_id", host_account_id)
if host_account_name is not None:
pulumi.set(__self__, "host_account_name", host_account_name)
if host_id is not None:
pulumi.set(__self__, "host_id", host_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if pass_phrase is not None:
pulumi.set(__self__, "pass_phrase", pass_phrase)
if password is not None:
pulumi.set(__self__, "password", password)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if protocol_name is not None:
pulumi.set(__self__, "protocol_name", protocol_name)
@property
@pulumi.getter(name="hostAccountId")
def host_account_id(self) -> Optional[pulumi.Input[str]]:
"""
Hosting account ID.
"""
return pulumi.get(self, "host_account_id")
@host_account_id.setter
def host_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_account_id", value)
@property
@pulumi.getter(name="hostAccountName")
def host_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the host account. The name can be up to 128 characters in length.
"""
return pulumi.get(self, "host_account_name")
@host_account_name.setter
def host_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_account_name", value)
@property
@pulumi.getter(name="hostId")
def host_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host for which you want to create an account.
"""
return pulumi.get(self, "host_id")
@host_id.setter
def host_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to create an account for the host.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="passPhrase")
def pass_phrase(self) -> Optional[pulumi.Input[str]]:
"""
The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
"""
return pulumi.get(self, "pass_phrase")
@pass_phrase.setter
def pass_phrase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pass_phrase", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the host account.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter(name="protocolName")
def protocol_name(self) -> Optional[pulumi.Input[str]]:
"""
The protocol used by the host account. Valid values: SSH,RDP
"""
return pulumi.get(self, "protocol_name")
@protocol_name.setter
def protocol_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol_name", value)
class HostAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_name: Optional[pulumi.Input[str]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
pass_phrase: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
protocol_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account resource.
For information about Bastion Host Host Account and how to use it, see [What is Host Account](https://www.alibabacloud.com/help/en/doc-detail/204377.htm).
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.bastionhost.HostAccount("example",
host_account_name="example_value",
host_id="15",
instance_id="bastionhost-cn-tl32bh0no30",
password="YourPassword12345",
protocol_name="SSH")
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostAccount:HostAccount example <instance_id>:<host_account_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] host_account_name: The name of the host account. The name can be up to 128 characters in length.
:param pulumi.Input[str] host_id: The ID of the host for which you want to create an account.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to create an account for the host.
:param pulumi.Input[str] pass_phrase: The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
:param pulumi.Input[str] password: The password of the host account.
:param pulumi.Input[str] private_key: The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
:param pulumi.Input[str] protocol_name: The protocol used by the host account. Valid values: SSH,RDP
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account resource.
For information about Bastion Host Host Account and how to use it, see [What is Host Account](https://www.alibabacloud.com/help/en/doc-detail/204377.htm).
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.bastionhost.HostAccount("example",
host_account_name="example_value",
host_id="15",
instance_id="bastionhost-cn-tl32bh0no30",
password="YourPassword12345",
protocol_name="SSH")
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostAccount:HostAccount example <instance_id>:<host_account_id>
```
:param str resource_name: The name of the resource.
:param HostAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_name: Optional[pulumi.Input[str]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
pass_phrase: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
protocol_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostAccountArgs.__new__(HostAccountArgs)
if host_account_name is None and not opts.urn:
raise TypeError("Missing required property 'host_account_name'")
__props__.__dict__["host_account_name"] = host_account_name
if host_id is None and not opts.urn:
raise TypeError("Missing required property 'host_id'")
__props__.__dict__["host_id"] = host_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["pass_phrase"] = pass_phrase
__props__.__dict__["password"] = password
__props__.__dict__["private_key"] = private_key
if protocol_name is None and not opts.urn:
raise TypeError("Missing required property 'protocol_name'")
__props__.__dict__["protocol_name"] = protocol_name
__props__.__dict__["host_account_id"] = None
super(HostAccount, __self__).__init__(
'alicloud:bastionhost/hostAccount:HostAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_id: Optional[pulumi.Input[str]] = None,
host_account_name: Optional[pulumi.Input[str]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
pass_phrase: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
protocol_name: Optional[pulumi.Input[str]] = None) -> 'HostAccount':
"""
Get an existing HostAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] host_account_id: Hosting account ID.
:param pulumi.Input[str] host_account_name: The name of the host account. The name can be up to 128 characters in length.
:param pulumi.Input[str] host_id: The ID of the host for which you want to create an account.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to create an account for the host.
:param pulumi.Input[str] pass_phrase: The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
:param pulumi.Input[str] password: The password of the host account.
:param pulumi.Input[str] private_key: The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
:param pulumi.Input[str] protocol_name: The protocol used by the host account. Valid values: SSH,RDP
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostAccountState.__new__(_HostAccountState)
__props__.__dict__["host_account_id"] = host_account_id
__props__.__dict__["host_account_name"] = host_account_name
__props__.__dict__["host_id"] = host_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["pass_phrase"] = pass_phrase
__props__.__dict__["password"] = password
__props__.__dict__["private_key"] = private_key
__props__.__dict__["protocol_name"] = protocol_name
return HostAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountId")
def host_account_id(self) -> pulumi.Output[str]:
"""
Hosting account ID.
"""
return pulumi.get(self, "host_account_id")
@property
@pulumi.getter(name="hostAccountName")
def host_account_name(self) -> pulumi.Output[str]:
"""
The name of the host account. The name can be up to 128 characters in length.
"""
return pulumi.get(self, "host_account_name")
@property
@pulumi.getter(name="hostId")
def host_id(self) -> pulumi.Output[str]:
"""
The ID of the host for which you want to create an account.
"""
return pulumi.get(self, "host_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to create an account for the host.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="passPhrase")
def pass_phrase(self) -> pulumi.Output[Optional[str]]:
"""
The passphrase of the private key for the host account. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`.
"""
return pulumi.get(self, "pass_phrase")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The password of the host account.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> pulumi.Output[Optional[str]]:
"""
The private key of the host account. The value is a Base64-encoded string. **NOTE:** It is valid when the attribute `protocol_name` is `SSH`
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="protocolName")
def protocol_name(self) -> pulumi.Output[str]:
"""
The protocol used by the host account. Valid values: SSH,RDP
"""
return pulumi.get(self, "protocol_name")
| 43.206564
| 186
| 0.642465
| 2,831
| 22,381
| 4.85906
| 0.067467
| 0.080765
| 0.100756
| 0.087962
| 0.883978
| 0.859334
| 0.840142
| 0.817898
| 0.797761
| 0.781332
| 0
| 0.004915
| 0.254636
| 22,381
| 517
| 187
| 43.290135
| 0.819686
| 0.346499
| 0
| 0.679715
| 1
| 0
| 0.105149
| 0.003293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160142
| false
| 0.177936
| 0.017794
| 0
| 0.274021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5921d77484b0ffde79d1ca9631e163656b37d5f3
| 114
|
py
|
Python
|
WebEx_Teams/tools/api_key_webex.py
|
insidus341/devnet
|
8d44119b54051dcbc2b894f394e9a2b2d0fee7d8
|
[
"MIT"
] | null | null | null |
WebEx_Teams/tools/api_key_webex.py
|
insidus341/devnet
|
8d44119b54051dcbc2b894f394e9a2b2d0fee7d8
|
[
"MIT"
] | null | null | null |
WebEx_Teams/tools/api_key_webex.py
|
insidus341/devnet
|
8d44119b54051dcbc2b894f394e9a2b2d0fee7d8
|
[
"MIT"
] | null | null | null |
key = 'Nzk1M2FkODEtY2NiMi00MWJjLWJjZDgtNzMxY2FlZTg2ZGNkZDM0MWMxN2QtOTdh_PF84_1eb65fdf-9643-417f-9974-ad72cae0e10f'
| 114
| 114
| 0.921053
| 8
| 114
| 12.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.267857
| 0.017544
| 114
| 1
| 114
| 114
| 0.651786
| 0
| 0
| 0
| 0
| 0
| 0.921739
| 0.921739
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a72e685d91510e37333feb3ab1262dd5d440df2b
| 2,309
|
py
|
Python
|
tests/lib/database.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | 1
|
2021-11-16T17:25:21.000Z
|
2021-11-16T17:25:21.000Z
|
tests/lib/database.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | 1
|
2021-12-22T19:04:34.000Z
|
2021-12-22T19:04:34.000Z
|
tests/lib/database.py
|
cloud-gov/legacy-domain-certificate-renewer
|
6b008fdc8e1277cfe4449626e6c488d11fc4857c
|
[
"CC0-1.0"
] | null | null | null |
import pytest
from renewer.db import SessionHandler, cdn_engine, domain_engine
@pytest.fixture(scope="function")
def clean_db():
with SessionHandler() as session:
session.execute("TRUNCATE TABLE user_data", bind=cdn_engine)
session.execute("TRUNCATE TABLE routes CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE operations CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE certificates CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE challenges CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE acme_user_v2 CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE user_data", bind=domain_engine)
session.execute("TRUNCATE TABLE routes CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE operations CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE certificates CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE challenges CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE acme_user_v2 CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE alb_proxies", bind=domain_engine)
session.commit()
session.close()
yield session
session.execute("TRUNCATE TABLE user_data", bind=cdn_engine)
session.execute("TRUNCATE TABLE routes CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE operations CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE certificates CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE challenges CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE acme_user_v2 CASCADE", bind=cdn_engine)
session.execute("TRUNCATE TABLE user_data", bind=domain_engine)
session.execute("TRUNCATE TABLE routes CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE operations CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE certificates CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE challenges CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE acme_user_v2 CASCADE", bind=domain_engine)
session.execute("TRUNCATE TABLE alb_proxies", bind=domain_engine)
session.commit()
session.close()
| 59.205128
| 82
| 0.736249
| 274
| 2,309
| 6.047445
| 0.127737
| 0.219674
| 0.345202
| 0.423657
| 0.923355
| 0.923355
| 0.923355
| 0.923355
| 0.923355
| 0.923355
| 0
| 0.002094
| 0.172802
| 2,309
| 38
| 83
| 60.763158
| 0.865445
| 0
| 0
| 0.833333
| 0
| 0
| 0.3534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| true
| 0
| 0.055556
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
59817d58d6a13765508d6591888faafa9de5fb0d
| 85
|
py
|
Python
|
IPython/external/jsonschema/__init__.py
|
breisfeld/ipython
|
70e2c414014f3323d8a52fbcc94ee9e3a92d5d5f
|
[
"BSD-3-Clause-Clear"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
IPython/external/jsonschema/__init__.py
|
breisfeld/ipython
|
70e2c414014f3323d8a52fbcc94ee9e3a92d5d5f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/external/jsonschema/__init__.py
|
breisfeld/ipython
|
70e2c414014f3323d8a52fbcc94ee9e3a92d5d5f
|
[
"BSD-3-Clause-Clear"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
try:
from jsonschema import *
except ImportError :
from _jsonschema import *
| 17
| 29
| 0.717647
| 9
| 85
| 6.666667
| 0.666667
| 0.466667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 85
| 4
| 30
| 21.25
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ab904ba0feba4770c518889ea6646d5d40c4b969
| 143
|
py
|
Python
|
deploy.py
|
gaetanV/python
|
6f5b38b918c5df2644a3f72606765904980fc2b8
|
[
"MIT"
] | null | null | null |
deploy.py
|
gaetanV/python
|
6f5b38b918c5df2644a3f72606765904980fc2b8
|
[
"MIT"
] | null | null | null |
deploy.py
|
gaetanV/python
|
6f5b38b918c5df2644a3f72606765904980fc2b8
|
[
"MIT"
] | null | null | null |
import subprocess
subprocess.call("gcc ./resolve/horse.c -o ./resolve/horse.exe")
subprocess.call("gcc ./resolve/down.c -o ./resolve/down.exe")
| 47.666667
| 63
| 0.741259
| 22
| 143
| 4.818182
| 0.454545
| 0.264151
| 0.320755
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062937
| 143
| 3
| 64
| 47.666667
| 0.791045
| 0
| 0
| 0
| 0
| 0
| 0.597222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
e6028103c590624651b0e182f176a34685c87460
| 3,801
|
py
|
Python
|
nfv/nfv-vim/nfv_vim/database/model/_instance_type.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | 2
|
2020-02-07T19:01:36.000Z
|
2022-02-23T01:41:46.000Z
|
nfv/nfv-vim/nfv_vim/database/model/_instance_type.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:02:25.000Z
|
2021-01-14T12:02:25.000Z
|
nfv/nfv-vim/nfv_vim/database/model/_instance_type.py
|
SidneyAn/nfv
|
5f0262a5b6ea4be59f977b9c587c483cbe0e373d
|
[
"Apache-2.0"
] | 2
|
2021-01-13T08:39:21.000Z
|
2022-02-09T00:21:55.000Z
|
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from nfv_vim.database.model._base import AsDictMixin
from nfv_vim.database.model._base import Base
class InstanceType_v5(AsDictMixin, Base):
"""
Instance Type Database Table
"""
__tablename__ = 'instance_types_v5'
uuid = Column(String(64), nullable=False, primary_key=True)
name = Column(String(64), nullable=False)
have_details = Column(Boolean, nullable=False)
vcpus = Column(Integer, nullable=False)
mem_mb = Column(Integer, nullable=False)
disk_gb = Column(Integer, nullable=False)
ephemeral_gb = Column(Integer, nullable=False)
swap_gb = Column(Integer, nullable=False)
guest_services = Column(String(2048), nullable=False)
auto_recovery = Column(Boolean, nullable=True)
live_migration_timeout = Column(Integer, nullable=True)
live_migration_max_downtime = Column(Integer, nullable=True)
def __init__(self):
"""
Default some of the settings of the flavor
"""
self.have_details = False
self.vcpus = 0
self.mem_mb = 0
self.disk_gb = 0
self.ephemeral_gb = 0
self.swap_gb = 0
self.guest_services = "{}"
self.auto_recovery = None
self.live_migration_timeout = None
self.live_migration_max_downtime = None
def __repr__(self):
if self.have_details:
return ("<Instance Type(%r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r )>"
% (self.uuid, self.name, self.vcpus, self.mem_mb,
self.disk_gb, self.ephemeral_gb, self.swap_gb,
self.guest_services, self.auto_recovery,
self.live_migration_timeout,
self.live_migration_max_downtime))
return "<Instance Type(%r, %r)>" % (self.uuid, self.name)
class InstanceType(AsDictMixin, Base):
"""
Instance Type Database Table
"""
__tablename__ = 'instance_types_v4'
uuid = Column(String(64), nullable=False, primary_key=True)
name = Column(String(64), nullable=False)
have_details = Column(Boolean, nullable=False)
vcpus = Column(Integer, nullable=False)
mem_mb = Column(Integer, nullable=False)
disk_gb = Column(Integer, nullable=False)
ephemeral_gb = Column(Integer, nullable=False)
swap_gb = Column(Integer, nullable=False)
guest_services = Column(String(2048), nullable=False)
auto_recovery = Column(Boolean, nullable=True)
live_migration_timeout = Column(Integer, nullable=True)
live_migration_max_downtime = Column(Integer, nullable=True)
storage_type = Column(String(128), nullable=True)
def __init__(self):
"""
Default some of the settings of the flavor
"""
self.have_details = False
self.vcpus = 0
self.mem_mb = 0
self.disk_gb = 0
self.ephemeral_gb = 0
self.swap_gb = 0
self.guest_services = "{}"
self.auto_recovery = None
self.live_migration_timeout = None
self.live_migration_max_downtime = None
self.storage_Type = None
def __repr__(self):
if self.have_details:
return ("<Instance Type(%r, %r, %r, %r, %r, %r, %r, %r, %r, %r, %r )>"
% (self.uuid, self.name, self.vcpus, self.mem_mb,
self.disk_gb, self.ephemeral_gb, self.swap_gb,
self.guest_services, self.auto_recovery,
self.live_migration_timeout,
self.live_migration_max_downtime))
return "<Instance Type(%r, %r)>" % (self.uuid, self.name)
| 36.2
| 82
| 0.635885
| 468
| 3,801
| 4.931624
| 0.168803
| 0.019064
| 0.023397
| 0.02773
| 0.883882
| 0.883882
| 0.883882
| 0.855286
| 0.855286
| 0.80156
| 0
| 0.014888
| 0.257827
| 3,801
| 104
| 83
| 36.548077
| 0.803261
| 0.060247
| 0
| 0.842105
| 0
| 0.026316
| 0.058436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.078947
| 0
| 0.565789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
05102942b27eae37630aaaeb0329a8ea114d6b6a
| 74,401
|
py
|
Python
|
lib/kb_muscle/kb_muscleImpl.py
|
dcchivian/kb_muscle
|
7740e2f273e0c59a0a550708092afa8666d0cb1e
|
[
"MIT"
] | 1
|
2020-01-13T19:39:17.000Z
|
2020-01-13T19:39:17.000Z
|
lib/kb_muscle/kb_muscleImpl.py
|
dcchivian/kb_muscle
|
7740e2f273e0c59a0a550708092afa8666d0cb1e
|
[
"MIT"
] | 9
|
2017-11-09T16:54:17.000Z
|
2021-06-23T21:11:55.000Z
|
lib/kb_muscle/kb_muscleImpl.py
|
dcchivian/kb_muscle
|
7740e2f273e0c59a0a550708092afa8666d0cb1e
|
[
"MIT"
] | 5
|
2017-06-25T02:42:55.000Z
|
2019-05-13T13:15:47.000Z
|
-*- coding: utf-8 -*-
#BEGIN_HEADER
import gzip
import os
import re
import subprocess
import sys
import traceback
import uuid
import json
from datetime import datetime
from pprint import pformat
import requests
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from requests_toolbelt import MultipartEncoder
from installed_clients.AbstractHandleClient import AbstractHandle as HandleService
from installed_clients.DataFileUtilClient import DataFileUtil as DFUClient
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace as workspaceService
from installed_clients.SetAPIServiceClient import SetAPI
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.kb_ObjectUtilitiesClient import kb_ObjectUtilities
#END_HEADER
class kb_muscle:
'''
Module Name:
kb_muscle
Module Description:
** A KBase module: kb_muscle
**
** This module runs MUSCLE to make MSAs of either DNA or PROTEIN sequences. "MUSCLE nuc" will build nucleotide alignments, even for protein coding genes. "MUSCLE prot" will build protein sequence alignments, and will ignore any features that do not code for proteins.
**
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.1.1"
GIT_URL = "https://github.com/kbaseapps/kb_muscle"
GIT_COMMIT_HASH = "d25d4d112be6c3fce5d879734dabdf5cc524ea2f"
#BEGIN_CLASS_HEADER
workspaceURL = None
shockURL = None
handleURL = None
serviceWizardURL = None
callbackURL = None
scratch = None
MUSCLE_bin = '/kb/module/muscle/bin/muscle'
# target is a list for collecting log messages
def log(self, target, message):
# we should do something better here...
if target is not None:
target.append(message)
print(message)
sys.stdout.flush()
def get_single_end_read_library(self, ws_data, ws_info, forward):
pass
def get_feature_set_seqs(self, ws_data, ws_info):
pass
def KBase_data2file_GenomeAnnotation2Fasta(self, ws_data, ws_info):
pass
def get_genome_set_feature_seqs(self, ws_data, ws_info):
pass
# Translation
def TranslateNucToProtSeq(self, ctx, params):
if 'nuc_seq' not in params or params['nuc_seq'] == None or params['nuc_seq'] == '':
raise ValueError('Method TranslateNucToProtSeq() requires nuc_seq parameter')
if 'genetic_code' not in params or params['genetic_code'] == None or params['genetic_code'] == '':
params['genetic_code'] = '11'
if params['genetic_code'] != '11':
raise ValueError('Method TranslateNucToProtSeq() only knows genetic code 11')
nuc_seq = params['nuc_seq'].upper()
prot_seq = ''
genetic_code = params['genetic_code']
genetic_code_table = dict()
genetic_code_table['11'] = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'
}
if genetic_code not in genetic_code_table:
raise ValueError ("genetic code '"+str(genetic_code)+"' not configured in genetic_code_table")
prot_seq = ''.join([genetic_code_table[genetic_code].get(nuc_seq[3*i:3*i+3],'X') for i in range(len(nuc_seq)//3)])
if prot_seq.endswith('_'):
prot_seq = prot_seq.rstrip('_')
return prot_seq
# AMA_METHODS
#def _get_ama_features_as_json (self, features_handle_ref, gff_handle_ref, protein_handle_ref):
def _get_ama_features_as_json (self, features_handle_ref):
this_id = str(uuid.uuid4())
this_scratch_dir = os.path.join (self.scratch, this_id)
json_features_file_path = os.path.join (this_scratch_dir, 'features.json')
#gff_file_path = os.path.join (this_scratch_dir, 'genes.gff')
#protein_file_path = os.path.join (this_scratch_dir, 'protein.fasta')
try:
dfu = DFUClient (self.callbackURL)
except Exception as e:
raise ValueError('Unable to connect to DFU: ' + str(e))
try:
dfu.shock_to_file({'handle_id': features_handle_ref,
'file_path': json_features_file_path+'.gz',
'unpack': 'uncompress'
})
except Exception as e:
raise ValueError('Unable to fetch AnnotatedMetagenomeAssembly features from SHOCK: ' + str(e))
"""
try:
dfu.shock_to_file({'handle_id': gff_handle_ref,
'file_path': gff_file_path+'.gz',
'unpack': 'uncompress'
})
except Exception as e:
raise ValueError('Unable to fetch AnnotatedMetagenomeAssembly gffs from SHOCK: ' + str(e))
try:
dfu.shock_to_file({'handle_id': protein_handle_ref,
'file_path': protein_file_path+'.gz',
'unpack': 'uncompress'
})
except Exception as e:
raise ValueError('Unable to fetch AnnotatedMetagenomeAssembly protein FASTA from SHOCK: ' + str(e))
"""
# DEBUG
"""
print ("SCRATCH CONTENTS")
sys.stdout.flush()
for this_file in os.listdir (this_scratch_dir):
print ("\t"+this_file)
sys.stdout.flush()
buf = []
#with open(json_features_file_path, 'r') as f:
with open(protein_file_path, 'r') as f:
for line in f.readlines():
buf.append (line)
#features_json = json.load(f)
print ("FEATURES_JSON:\n"+"\n".join(buf))
sys.stdout.flush()
"""
with open(json_features_file_path, 'r') as f:
features_json = json.load(f)
os.remove(json_features_file_path+'.gz')
os.remove(json_features_file_path)
#os.remove(gff_file_path+'.gz')
#os.remove(gff_file_path)
#os.remove(protein_file_path+'.gz')
#os.remove(protein_file_path)
return features_json
def _get_features_from_AnnotatedMetagenomeAssembly(self, ctx, ama_ref):
# get ama object
try:
ws = workspaceService(self.workspaceURL, token=ctx['token'])
ama_object = ws.get_objects2({'objects':[{'ref':ama_ref}]})['data'][0]
ama_object_data = ama_object['data']
ama_object_info = ama_object['info']
except Exception as e:
raise ValueError('Unable to fetch AnnotatedMetagenomeAssembly object from workspace: ' + str(e))
#to get the full stack trace: traceback.format_exc()
# get features from json
features_handle_ref = ama_object_data['features_handle_ref']
#gff_handle_ref = ama_object_data['gff_handle_ref']
#protein_handle_ref = ama_object_data['protein_handle_ref']
#features_json = self._get_ama_features_as_json (features_handle_ref, gff_handle_ref, protein_handle_ref)
features_json = self._get_ama_features_as_json (features_handle_ref)
return features_json
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.shockURL = config['shock-url']
self.handleURL = config['handle-service-url']
self.serviceWizardURL = config['srv-wiz-url']
self.callbackURL = os.environ.get('SDK_CALLBACK_URL')
if self.callbackURL == None:
raise ValueError ("SDK_CALLBACK_URL not set in environment")
self.scratch = os.path.abspath(config['scratch'])
# HACK!! temporary hack for issue where megahit fails on mac because of silent named pipe error
#self.host_scratch = self.scratch
#self.scratch = os.path.join('/kb','module','local_scratch')
# end hack
if not os.path.exists(self.scratch):
os.makedirs(self.scratch)
#END_CONSTRUCTOR
pass
def MUSCLE_nuc(self, ctx, params):
"""
Methods for MSA building of either DNA or PROTEIN sequences
**
** overloading as follows:
** input_ref: SingleEndLibrary (just MUSCLE_nuc), FeatureSet (both)
** output_name: MSA
:param params: instance of type "MUSCLE_Params" (MUSCLE Input Params
** ** MUSCLE_prot(): input_ref must be FeatureSet ** MUSCLE_nuc():
input_ref must be FeatureSet, SingleEndLibrary, or AssemblySet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "desc" of String, parameter "input_ref" of type
"data_obj_ref", parameter "output_name" of type "data_obj_name",
parameter "genome_disp_name_config" of String, parameter
"maxiters" of Long, parameter "maxhours" of Double
:returns: instance of type "MUSCLE_Output" (MUSCLE Output) ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN MUSCLE_nuc
console = []
invalid_msgs = []
self.log(console,'Running MUSCLE_nuc with params=')
self.log(console, "\n"+pformat(params))
report = ''
# report = 'Running MUSCLE_nuc with params='
# report += "\n"+pformat(params)
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I,
WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
row_labels = {}
#### do some basic checks
#
if 'workspace_name' not in params:
raise ValueError('workspace_name parameter is required')
if 'input_ref' not in params:
raise ValueError('input_ref parameter is required')
if 'output_name' not in params:
raise ValueError('output_name parameter is required')
#### Get the input_ref object
##
input_forward_reads_file_compression = None
sequencing_tech = 'N/A'
try:
ws = workspaceService(self.workspaceURL, token=ctx['token'])
objects = ws.get_objects([{'ref': params['input_ref']}])
data = objects[0]['data']
info = objects[0]['info']
input_name = info[1]
input_type_name = info[2].split('.')[1].split('-')[0]
if input_type_name == 'SingleEndLibrary':
input_type_namespace = info[2].split('.')[0]
if input_type_namespace == 'KBaseAssembly':
file_name = data['handle']['file_name']
elif input_type_namespace == 'KBaseFile':
file_name = data['lib']['file']['file_name']
else:
raise ValueError('bad data type namespace: '+input_type_namespace)
#self.log(console, 'INPUT_FILENAME: '+file_name) # DEBUG
if file_name[-3:] == ".gz":
input_forward_reads_file_compression = 'gz'
if 'sequencing_tech' in data:
sequencing_tech = data['sequencing_tech']
except Exception as e:
traceback.format_exc()
raise ValueError('Unable to fetch input_ref object from workspace: ' + str(e))
# Handle overloading (input_ref can be SingleEndLibrary or FeatureSet)
#
if input_type_name == 'SingleEndLibrary':
# DEBUG
#for k in data:
# self.log(console,"SingleEndLibrary ["+k+"]: "+str(data[k]))
try:
if 'lib' in data:
input_forward_reads = data['lib']['file']
elif 'handle' in data:
input_forward_reads = data['handle']
else:
self.log(console,"bad structure for 'input_forward_reads'")
raise ValueError("bad structure for 'input_forward_reads'")
### NOTE: this section is what could be replaced by the transform services
input_forward_reads_file_path = os.path.join(self.scratch,input_forward_reads['file_name'])
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'w')
self.log(console, 'downloading reads file: '+str(input_forward_reads_file_path))
headers = {'Authorization': 'OAuth '+ctx['token']}
r = requests.get(input_forward_reads['url']+'/node/'+input_forward_reads['id']+'?download', stream=True, headers=headers)
for chunk in r.iter_content(1024):
input_forward_reads_file_handle.write(chunk)
input_forward_reads_file_handle.close();
self.log(console, 'done')
### END NOTE
# remove carriage returns
new_file_path = input_forward_reads_file_path+"-CRfree"
new_file_handle = open(new_file_path, 'w')
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'r')
for line in input_forward_reads_file_handle:
line = re.sub("\r","",line)
new_file_handle.write(line)
input_forward_reads_file_handle.close();
new_file_handle.close()
input_forward_reads_file_path = new_file_path
# convert FASTQ to FASTA (if necessary)
new_file_path = input_forward_reads_file_path+".fna"
new_file_handle = open(new_file_path, 'w')
if input_forward_reads_file_compression == 'gz':
input_forward_reads_file_handle = gzip.open(input_forward_reads_file_path, 'r')
else:
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'r')
header = None
last_header = None
last_seq_buf = None
last_line_was_header = False
was_fastq = False
for line in input_forward_reads_file_handle:
if line.startswith('>'):
break
elif line.startswith('@'):
was_fastq = True
header = line[1:]
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
last_seq_buf = None
last_header = header
last_line_was_header = True
elif last_line_was_header:
last_seq_buf = line
last_line_was_header = False
else:
continue
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
new_file_handle.close()
input_forward_reads_file_handle.close()
if was_fastq:
input_forward_reads_file_path = new_file_path
except Exception as e:
print(traceback.format_exc())
raise ValueError('Unable to download single-end read library files: ' + str(e))
# FeatureSet
#
elif input_type_name == 'FeatureSet':
genome_id_feature_id_delim = '.f:'
# retrieve sequences for features
input_featureSet = data
genomeObjName = {}
genomeObjVer = {}
genomeSciName = {}
genome2Features = {}
new_id = {}
featureSet_elements = input_featureSet['elements']
if 'element_ordering' in input_featureSet and input_featureSet['element_ordering']:
feature_order = input_featureSet['element_ordering']
else:
feature_order = sorted(featureSet_elements.keys())
for fId in feature_order:
genomeRef = featureSet_elements[fId][0]
if genomeRef not in genome2Features:
genome2Features[genomeRef] = []
new_id[genomeRef] = {}
if genome_id_feature_id_delim in fId:
[genome_id, feature_id] = fId.split(genome_id_feature_id_delim)
else:
feature_id = fId
genome2Features[genomeRef].append(feature_id)
this_id = genomeRef + genome_id_feature_id_delim + feature_id
new_id[genomeRef][fId] = this_id
# export features to FASTA file
input_forward_reads_file_path = os.path.join(self.scratch, input_name+".fasta")
self.log(console, 'writing fasta file: '+input_forward_reads_file_path)
records_by_fid = dict()
for genomeRef in genome2Features:
genome_obj = ws.get_objects([{'ref':genomeRef}])[0]
genome_type = re.sub('-[0-9]+\.[0-9]+$', "", genome_obj['info'][TYPE_I])
genomeObjName[genomeRef] = genome_obj['info'][NAME_I]
genomeObjVer[genomeRef] = genome_obj['info'][VERSION_I]
these_genomeFeatureIds = genome2Features[genomeRef]
# Genome
if genome_type == 'KBaseGenomes.Genome':
genome = genome_obj['data']
genomeSciName[genomeRef] = genome['scientific_name']
for feature in genome['features']:
if feature['id'] in these_genomeFeatureIds:
#self.log(console,"kbase_id: '"+feature['id']+"'") # DEBUG
this_id = genomeRef + genome_id_feature_id_delim + feature['id']
short_feature_id = re.sub("^.*\.([^\.]+)\.([^\.]+)$", r"\1.\2", feature['id'])
genome_disp_name = ''
if 'obj_name' in params.get('genome_disp_name_config'):
genome_disp_name += genomeObjName[genomeRef]
if 'ver' in params.get('genome_disp_name_config'):
genome_disp_name += '.v'+str(genomeObjVer[genomeRef])
if genome_type == "KBaseGenomes.Genome" and \
'sci_name' in params.get('genome_disp_name_config'):
genome_disp_name += ': '+genomeSciName[genomeRef]
else:
genome_disp_name = genomeObjName[genomeRef]
row_labels[this_id] = genome_disp_name+' - '+short_feature_id
#record = SeqRecord(Seq(feature['dna_sequence']), id=feature['id'], description=genome['id'])
record = SeqRecord(Seq(feature['dna_sequence']), id=this_id, description=genome['id'])
records_by_fid[this_id] = record
# AnnotatedMetagenomeAssembly
elif genome_type == 'KBaseMetagenomes.AnnotatedMetagenomeAssembly':
ama_features = self._get_features_from_AnnotatedMetagenomeAssembly (ctx, genomeRef)
for feature in ama_features:
if feature['id'] in these_genomeFeatureIds:
if not feature.get('dna_sequence'):
raise ValueError("bad feature "+feature['id']+": No dna_sequence field.")
this_id = genomeRef + genome_id_feature_id_delim + feature['id']
short_feature_id = re.sub("^.*\.([^\.]+)\.([^\.]+)$", r"\1.\2", feature['id'])
genome_disp_name = genomeObjName[genomeRef]
row_labels[this_id] = genome_disp_name+' - '+short_feature_id
record = SeqRecord(Seq(feature['dna_sequence']), id=this_id, description=genomeObjName[genomeRef])
records_by_fid[this_id] = record
else:
raise ValueError ("unable to handle feature from object type: "+genome_type)
records = []
for fId in feature_order:
genomeRef = featureSet_elements[fId][0]
records.append(records_by_fid[new_id[genomeRef][fId]])
SeqIO.write(records, input_forward_reads_file_path, "fasta")
# Missing proper input_input_type
#
else:
raise ValueError('Cannot yet handle input_ref type of: '+input_type_name)
"""
# AssemblySet
#
elif input_type_name == 'AssemblySet':
try:
SetAPI_Client = SetAPI(self.serviceWizardURL, token=ctx['token'])
except Exception as e:
raise ValueError ("unable to instantiate SetAPI Client")
try:
auClient = AssemblyUtil(self.callbackURL, token=ctx['token'])
except Exception as e:
raise ValueError ("unable to instantiate AssemblyUtil Client")
# HERE
"""
### Construct the command
#
# e.g. muscle -in <fasta_in> -out <fasta_out> -maxiters <n> -haxours <h>
#
muscle_cmd = [self.MUSCLE_bin]
# check for necessary files
if not os.path.isfile(self.MUSCLE_bin):
raise ValueError("no such file '"+self.MUSCLE_bin+"'")
if not os.path.isfile(input_forward_reads_file_path):
raise ValueError("no such file '"+input_forward_reads_file_path+"'")
elif not os.path.getsize(input_forward_reads_file_path) > 0:
raise ValueError("empty file '"+input_forward_reads_file_path+"'")
# set the output path
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)
output_dir = os.path.join(self.scratch,'output.'+str(timestamp))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_aln_file_path = os.path.join(output_dir, params['output_name']+'-MSA.fasta');
file_extension = ''
muscle_cmd.append('-in')
muscle_cmd.append(input_forward_reads_file_path)
muscle_cmd.append('-out')
muscle_cmd.append(output_aln_file_path)
# options
if 'maxiters' in params and params['maxiters'] != None:
muscle_cmd.append('-maxiters')
muscle_cmd.append(str(params['maxiters']))
if 'maxhours' in params and params['maxhours'] != None:
muscle_cmd.append('-maxhours')
muscle_cmd.append(str(params['maxhours']))
# Run MUSCLE, capture output as it happens
#
self.log(console, 'RUNNING MUSCLE:')
self.log(console, ' '+' '.join(muscle_cmd))
# report += "\n"+'running MUSCLE:'+"\n"
# report += ' '+' '.join(muscle_cmd)+"\n"
p = subprocess.Popen(muscle_cmd, \
cwd = self.scratch, \
stdout = subprocess.PIPE, \
stderr = subprocess.STDOUT, \
shell = False)
while True:
line = p.stdout.readline().decode()
if not line: break
self.log(console, line.replace('\n', ''))
p.stdout.close()
p.wait()
self.log(console, 'return code: ' + str(p.returncode))
if p.returncode != 0:
raise ValueError('Error running MUSCLE, return code: '+str(p.returncode) +
'\n\n'+ '\n'.join(console))
# Parse the FASTA MSA output and replace id for txt upload
#
self.log(console, 'PARSING MUSCLE MSA FASTA OUTPUT')
if not os.path.isfile(output_aln_file_path):
raise ValueError("failed to create MUSCLE output: "+output_aln_file_path)
elif not os.path.getsize(output_aln_file_path) > 0:
raise ValueError("created empty file for MUSCLE output: "+output_aln_file_path)
output_aln_file_handle = open (output_aln_file_path, 'r')
output_fasta_buf = []
row_order = []
alignment = {}
alignment_length = None
last_header = None
header = None
last_seq = ''
leading_chars_pattern = re.compile("^\S+")
for line in output_aln_file_handle:
line = line.rstrip('\n')
if line.startswith('>'):
header = line[1:]
if row_labels:
this_id = leading_chars_pattern.findall(header)[0]
this_row_label = re.sub ('\s', '_', row_labels[this_id])
output_fasta_buf.append('>'+this_row_label)
else:
output_fasta_buf.append(line)
if last_header != None:
last_id = leading_chars_pattern.findall(last_header)[0]
row_order.append(last_id)
#self.log(console,"ID: '"+last_id+"'\nALN: '"+last_seq+"'") # DEBUG
#report += last_id+"\t"+last_seq+"\n"
alignment[last_id] = last_seq
if alignment_length == None:
alignment_length = len(last_seq)
elif alignment_length != len(last_seq):
raise ValueError ("unequal alignment row for "+last_header+": '"+last_seq+"'")
last_header = header
last_seq = ''
else:
last_seq += line
output_fasta_buf.append(line)
if last_header != None:
last_id = leading_chars_pattern.findall(last_header)[0]
row_order.append(last_id)
#self.log(console,"ID: '"+last_id+"'\nALN: '"+last_seq+"'") # DEBUG
#report += last_id+"\t"+last_seq+"\n"
alignment[last_id] = last_seq
if alignment_length == None:
alignment_length = len(last_seq)
elif alignment_length != len(last_seq):
raise ValueError ("unequal alignment row for "+last_header+": '"+last_seq+"'")
output_aln_file_handle.close()
# write remapped ids
with open(output_aln_file_path, 'w') as output_aln_file_handle:
output_aln_file_handle.write("\n".join(output_fasta_buf)+"\n")
# load the method provenance from the context object
#
self.log(console,"SETTING PROVENANCE") # DEBUG
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects'] = []
provenance[0]['input_ws_objects'].append(params['input_ref'])
provenance[0]['service'] = 'kb_muscle'
provenance[0]['method'] = 'MUSCLE_nuc'
# Upload results
#
if len(invalid_msgs) == 0:
self.log(console,"UPLOADING RESULTS") # DEBUG
MSA_name = params['output_name']
MSA_description = params['desc']
sequence_type = 'dna'
ws_refs = None # may add these later from FeatureSet
kb_refs = None
#alignment_length # already have
#row_order # already have
#alignment # already have
# NO trim_info
# NO alignment_attributes
# NO default_row_labels
# NO parent_msa_ref
# if input_type_name == 'FeatureSet':
# features = featureSet['elements']
# genome2Features = {}
# for fId in row_order:
# genomeRef = features[fId][0]
# if genomeRef not in genome2Features:
# genome2Features[genomeRef] = []
# genome2Features[genomeRef].append(fId)
#
# for genomeRef in genome2Features:
# genome = ws.get_objects([{'ref':genomeRef}])[0]['data']
# these_genomeFeatureIds = genome2Features[genomeRef]
# for feature in genome['features']:
# if feature['id'] in these_genomeFeatureIds:
output_MSA = {
'name': MSA_name,
'description': MSA_description,
'sequence_type': sequence_type,
'alignment_length': alignment_length,
'row_order': row_order,
'alignment': alignment
}
if row_labels:
output_MSA['default_row_labels'] = row_labels
new_obj_info = ws.save_objects({
'workspace': params['workspace_name'],
'objects':[{
'type': 'KBaseTrees.MSA',
'data': output_MSA,
'name': params['output_name'],
'meta': {},
'provenance': provenance
}]
})
# create CLW formatted output file
max_row_width = 60
id_aln_gap_width = 1
gap_chars = ''
for sp_i in range(id_aln_gap_width):
gap_chars += ' '
# DNA
strong_groups = { 'AG': True,
'CTU': True
}
weak_groups = None
# PROTEINS
# strong_groups = { 'AST': True,
# 'EKNQ': True,
# 'HKNQ': True,
# 'DENQ': True,
# 'HKQR': True,
# 'ILMV': True,
# 'FILM': True,
# 'HY': True,
# 'FWY': True
# }
# weak_groups = { 'ACS': True,
# 'ATV': True,
# 'AGS': True,
# 'KNST': True,
# 'APST': True,
# 'DGNS': True,
# 'DEKNQS': True,
# 'DEHKNQ': True,
# 'EHKNQR': True,
# 'FILMV': True,
# 'FHY': True
# }
clw_buf = []
clw_buf.append ('CLUSTALW format of MUSCLE alignment '+MSA_name+': '+MSA_description)
clw_buf.append ('')
long_id_len = 0
aln_pos_by_id = dict()
for row_id in row_order:
aln_pos_by_id[row_id] = 0
if row_labels:
row_id_disp = row_labels[row_id]
else:
row_id_disp = row_id
if long_id_len < len(row_id_disp):
long_id_len = len(row_id_disp)
full_row_cnt = alignment_length // max_row_width
if alignment_length % max_row_width == 0:
full_row_cnt -= 1
for chunk_i in range (full_row_cnt + 1):
for row_id in row_order:
if row_labels:
row_id_disp = re.sub('\s', '_', row_labels[row_id])
else:
row_id_disp = row_id
for sp_i in range (long_id_len-len(row_id_disp)):
row_id_disp += ' '
aln_chunk_upper_bound = (chunk_i+1)*max_row_width
if aln_chunk_upper_bound > alignment_length:
aln_chunk_upper_bound = alignment_length
aln_chunk = alignment[row_id][chunk_i*max_row_width:aln_chunk_upper_bound]
for c in aln_chunk:
if c != '-':
aln_pos_by_id[row_id] += 1
clw_buf.append (row_id_disp+gap_chars+aln_chunk+' '+str(aln_pos_by_id[row_id]))
# conservation line
cons_line = ''
for pos_i in range(chunk_i*max_row_width, aln_chunk_upper_bound):
col_chars = dict()
seq_cnt = 0
for row_id in row_order:
char = alignment[row_id][pos_i]
if char != '-':
seq_cnt += 1
col_chars[char] = True
if seq_cnt <= 1:
cons_char = ' '
elif len(col_chars.keys()) == 1:
cons_char = '*'
else:
strong = False
for strong_group in strong_groups.keys():
this_strong_group = True
for seen_char in col_chars.keys():
if seen_char not in strong_group:
this_strong_group = False
break
if this_strong_group:
strong = True
break
if not strong:
weak = False
if weak_groups is not None:
for weak_group in weak_groups.keys():
this_weak_group = True
for seen_char in col_chars.keys():
if seen_char not in weak_group:
this_strong_group = False
break
if this_weak_group:
weak = True
if strong:
cons_char = ':'
elif weak:
cons_char = '.'
else:
cons_char = ' '
cons_line += cons_char
lead_space = ''
for sp_i in range(long_id_len):
lead_space += ' '
lead_space += gap_chars
clw_buf.append(lead_space+cons_line)
clw_buf.append('')
# write clw to file
clw_buf_str = "\n".join(clw_buf)+"\n"
output_clw_file_path = os.path.join(output_dir, input_name+'-MSA.clw')
with open (output_clw_file_path, 'w') as output_clw_file_handle:
output_clw_file_handle.write(clw_buf_str)
# upload MUSCLE FASTA output to SHOCK for file_links
dfu = DFUClient(self.callbackURL)
try:
output_upload_ret = dfu.file_to_shock({'file_path': output_aln_file_path,
# DEBUG
# 'make_handle': 0,
# 'pack': 'zip'})
'make_handle': 0})
except:
raise ValueError ('error loading aln_out file to shock')
# upload MUSCLE CLW output to SHOCK for file_links
try:
output_clw_upload_ret = dfu.file_to_shock({'file_path': output_clw_file_path,
# DEBUG
# 'make_handle': 0,
# 'pack': 'zip'})
'make_handle': 0})
except:
raise ValueError ('error loading clw_out file to shock')
# make HTML reports
#
# HERE
# build output report object
#
self.log(console,"BUILDING REPORT") # DEBUG
reportName = 'muscle_report_'+str(uuid.uuid4())
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'],
'description':'MUSCLE_nuc MSA'}],
#'message': '',
'message': clw_buf_str,
'file_links': [],
'workspace_name': params['workspace_name'],
'report_object_name': reportName
}
reportObj['file_links'] = [{'shock_id': output_upload_ret['shock_id'],
'name': params['output_name']+'-MUSCLE_nuc.FASTA',
'label': 'MUSCLE_nuc FASTA'
},
{'shock_id': output_clw_upload_ret['shock_id'],
'name': params['output_name']+'-MUSCLE_nuc.CLW',
'label': 'MUSCLE_nuc CLUSTALW'
}]
# save report object
#
SERVICE_VER = 'release'
reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)
#report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})
report_info = reportClient.create_extended_report(reportObj)
else: # len(invalid_msgs) > 0
reportName = 'muscle_report_'+str(uuid.uuid4())
report += "FAILURE:\n\n"+"\n".join(invalid_msgs)+"\n"
reportObj = {
'objects_created':[],
'text_message':report
}
ws = workspaceService(self.workspaceURL, token=ctx['token'])
report_obj_info = ws.save_objects({
#'id':info[6],
'workspace':params['workspace_name'],
'objects':[
{
'type':'KBaseReport.Report',
'data':reportObj,
'name':reportName,
'meta':{},
'hidden':1,
'provenance':provenance
}
]
})[0]
report_info = dict()
report_info['name'] = report_obj_info[1]
report_info['ref'] = str(report_obj_info[6])+'/'+str(report_obj_info[0])+'/'+str(report_obj_info[4])
# done
returnVal = { 'report_name': report_info['name'],
'report_ref': report_info['ref']
}
self.log(console,"MUSCLE_nuc DONE")
#END MUSCLE_nuc
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method MUSCLE_nuc return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def MUSCLE_prot(self, ctx, params):
"""
:param params: instance of type "MUSCLE_Params" (MUSCLE Input Params
** ** MUSCLE_prot(): input_ref must be FeatureSet ** MUSCLE_nuc():
input_ref must be FeatureSet, SingleEndLibrary, or AssemblySet) ->
structure: parameter "workspace_name" of type "workspace_name" (**
The workspace object refs are of form: ** ** objects =
ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "desc" of String, parameter "input_ref" of type
"data_obj_ref", parameter "output_name" of type "data_obj_name",
parameter "genome_disp_name_config" of String, parameter
"maxiters" of Long, parameter "maxhours" of Double
:returns: instance of type "MUSCLE_Output" (MUSCLE Output) ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
# ctx is the context object
# return variables are: returnVal
#BEGIN MUSCLE_prot
console = []
invalid_msgs = []
self.log(console,'Running MUSCLE_prot with params=')
self.log(console, "\n"+pformat(params))
report = ''
# report = 'Running MUSCLE_prot with params='
# report += "\n"+pformat(params)
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I,
WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
row_labels = {}
#### do some basic checks
#
if 'workspace_name' not in params:
raise ValueError('workspace_name parameter is required')
if 'input_ref' not in params:
raise ValueError('input_ref parameter is required')
if 'output_name' not in params:
raise ValueError('output_name parameter is required')
#### Get the input_ref object
##
# input_forward_reads_file_compression = None
# sequencing_tech = 'N/A'
try:
ws = workspaceService(self.workspaceURL, token=ctx['token'])
objects = ws.get_objects([{'ref': params['input_ref']}])
data = objects[0]['data']
info = objects[0]['info']
input_name = info[1]
input_type_name = info[2].split('.')[1].split('-')[0]
# if input_type_name == 'SingleEndLibrary':
# input_type_namespace = info[2].split('.')[0]
# if input_type_namespace == 'KBaseAssembly':
# file_name = data['handle']['file_name']
# elif input_type_namespace == 'KBaseFile':
# file_name = data['lib']['file']['file_name']
# else:
# raise ValueError('bad data type namespace: '+input_type_namespace)
# #self.log(console, 'INPUT_FILENAME: '+file_name) # DEBUG
# if file_name[-3:] == ".gz":
# input_forward_reads_file_compression = 'gz'
# if 'sequencing_tech' in data:
# sequencing_tech = data['sequencing_tech']
except Exception as e:
traceback.format_exc()
raise ValueError('Unable to fetch input_ref object from workspace: ' + str(e))
# Handle overloading (input_name can be SingleEndLibrary or FeatureSet)
#
"""
if input_type_name == 'SingleEndLibrary':
# DEBUG
#for k in data:
# self.log(console,"SingleEndLibrary ["+k+"]: "+str(data[k]))
try:
if 'lib' in data:
input_forward_reads = data['lib']['file']
elif 'handle' in data:
input_forward_reads = data['handle']
else:
self.log(console,"bad structure for 'input_forward_reads'")
raise ValueError("bad structure for 'input_forward_reads'")
### NOTE: this section is what could be replaced by the transform services
input_forward_reads_file_path = os.path.join(self.scratch,input_forward_reads['file_name'])
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'w')
self.log(console, 'downloading reads file: '+str(input_forward_reads_file_path))
headers = {'Authorization': 'OAuth '+ctx['token']}
r = requests.get(input_forward_reads['url']+'/node/'+input_forward_reads['id']+'?download', stream=True, headers=headers)
for chunk in r.iter_content(1024):
input_forward_reads_file_handle.write(chunk)
input_forward_reads_file_handle.close()
self.log(console, 'done')
### END NOTE
# remove carriage returns
new_file_path = input_forward_reads_file_path+"-CRfree"
new_file_handle = open(new_file_path, 'w')
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'r')
for line in input_forward_reads_file_handle:
line = re.sub("\r","",line)
new_file_handle.write(line)
input_forward_reads_file_handle.close()
new_file_handle.close()
input_forward_reads_file_path = new_file_path
# convert FASTQ to FASTA (if necessary)
new_file_path = input_forward_reads_file_path+".fna"
new_file_handle = open(new_file_path, 'w')
if input_forward_reads_file_compression == 'gz':
input_forward_reads_file_handle = gzip.open(input_forward_reads_file_path, 'r')
else:
input_forward_reads_file_handle = open(input_forward_reads_file_path, 'r')
header = None
last_header = None
last_seq_buf = None
last_line_was_header = False
was_fastq = False
for line in input_forward_reads_file_handle:
if line.startswith('>'):
break
elif line.startswith('@'):
was_fastq = True
header = line[1:]
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
last_seq_buf = None
last_header = header
last_line_was_header = True
elif last_line_was_header:
last_seq_buf = line
last_line_was_header = False
else:
continue
if last_header != None:
new_file_handle.write('>'+last_header)
new_file_handle.write(last_seq_buf)
new_file_handle.close()
input_forward_reads_file_handle.close()
if was_fastq:
input_forward_reads_file_path = new_file_path
except Exception as e:
print(traceback.format_exc())
raise ValueError('Unable to download single-end read library files: ' + str(e))
"""
# FeatureSet
#
# elif input_type_name == 'FeatureSet':
if input_type_name == 'FeatureSet':
genome_id_feature_id_delim = '.f:'
# retrieve sequences for features
input_featureSet = data
genomeObjName = {}
genomeObjVer = {}
genomeSciName = {}
genome2Features = {}
new_id = {}
featureSet_elements = input_featureSet['elements']
if 'element_ordering' in input_featureSet and input_featureSet['element_ordering']:
feature_order = input_featureSet['element_ordering']
else:
feature_order = sorted(featureSet_elements.keys())
for fId in feature_order:
genomeRef = featureSet_elements[fId][0]
if genomeRef not in genome2Features:
genome2Features[genomeRef] = []
new_id[genomeRef] = {}
if genome_id_feature_id_delim in fId:
[genome_id, feature_id] = fId.split(genome_id_feature_id_delim)
else:
feature_id = fId
genome2Features[genomeRef].append(feature_id)
this_id = genomeRef + genome_id_feature_id_delim + feature_id
new_id[genomeRef][fId] = this_id
# export features to FASTA file
input_forward_reads_file_path = os.path.join(self.scratch, input_name+".fasta")
self.log(console, 'writing fasta file: '+input_forward_reads_file_path)
records_by_fid = dict()
proteins_found = 0
for genomeRef in genome2Features:
genome_obj = ws.get_objects([{'ref':genomeRef}])[0]
genome_type = re.sub('-[0-9]+\.[0-9]+$', "", genome_obj['info'][TYPE_I])
genomeObjName[genomeRef] = genome_obj['info'][NAME_I]
genomeObjVer[genomeRef] = genome_obj['info'][VERSION_I]
these_genomeFeatureIds = genome2Features[genomeRef]
# Genome
if genome_type == 'KBaseGenomes.Genome':
genome = genome_obj['data']
genomeSciName[genomeRef] = genome['scientific_name']
for feature in genome['features']:
if feature['id'] in these_genomeFeatureIds:
if 'protein_translation' not in feature or feature['protein_translation'] == None:
self.log(invalid_msgs,"bad CDS Feature "+feature['id']+": no protein_translation found")
continue
else:
#self.log(console,"kbase_id: '"+feature['id']+"'") # DEBUG
this_id = genomeRef + genome_id_feature_id_delim + feature['id']
this_id = re.sub ('\s', '_', this_id)
short_feature_id = re.sub("^.*\.([^\.]+)\.([^\.]+)$", r"\1.\2", feature['id'])
genome_disp_name = ''
if 'obj_name' in params.get('genome_disp_name_config'):
genome_disp_name += genomeObjName[genomeRef]
if 'ver' in params.get('genome_disp_name_config'):
genome_disp_name += '.v'+str(genomeObjVer[genomeRef])
if genome_type == "KBaseGenomes.Genome" and \
'sci_name' in params.get('genome_disp_name_config'):
genome_disp_name += ': '+genomeSciName[genomeRef]
else:
genome_disp_name = genomeObjName[genomeRef]
row_labels[this_id] = genome_disp_name+' - '+short_feature_id
#record = SeqRecord(Seq(feature['protein_translation']), id=feature['id'], description=genome['id'])
record = SeqRecord(Seq(feature['protein_translation']), id=this_id, description=genome['id'])
proteins_found += 1
records_by_fid[this_id] = record
# AnnotatedMetagenomeAssembly
elif genome_type == 'KBaseMetagenomes.AnnotatedMetagenomeAssembly':
ama_features = self._get_features_from_AnnotatedMetagenomeAssembly (ctx, genomeRef)
for feature in ama_features:
if feature['id'] in these_genomeFeatureIds:
if not feature.get('type'):
raise ValueError ("No type for AMA feature "+feature['id'])
if feature['type'] != 'CDS':
self.log ("skipping non-CDS AMA feature "+feature['id'])
continue
if not feature.get('protein_translatkon'):
self.log(console,"AMA CDS Feature "+feature['id']+": no protein_translation found. Auto-translatiing from dna_sequence")
prot_translation = self.TranslateNucToProtSeq(ctx,
{'nuc_seq': feature['dna_sequence'],
'genetic_code': '11'})
else:
prot_translation = feature['protein_translation']
this_id = genomeRef + genome_id_feature_id_delim + feature['id']
short_feature_id = re.sub("^.*\.([^\.]+)\.([^\.]+)$", r"\1.\2", feature['id'])
genome_disp_name = genomeObjName[genomeRef]
row_labels[this_id] = genome_disp_name+' - '+short_feature_id
record = SeqRecord(Seq(prot_translation), id=this_id, description=genomeObjName[genomeRef])
proteins_found += 1
records_by_fid[this_id] = record
if proteins_found < 2:
self.log(invalid_msgs,"Less than 2 protein Features (CDS) found. exiting...")
else:
records = []
for fId in feature_order:
genomeRef = featureSet_elements[fId][0]
records.append(records_by_fid[new_id[genomeRef][fId]])
SeqIO.write(records, input_forward_reads_file_path, "fasta")
# Missing proper input_input_type
#
else:
raise ValueError('Cannot yet handle input_ref type of: '+input_type_name)
### Construct the command
#
# e.g. muscle -in <fasta_in> -out <fasta_out> -maxiters <n> -haxours <h>
#
if len(invalid_msgs) == 0:
muscle_cmd = [self.MUSCLE_bin]
# check for necessary files
if not os.path.isfile(self.MUSCLE_bin):
raise ValueError("no such file '"+self.MUSCLE_bin+"'")
if not os.path.isfile(input_forward_reads_file_path):
raise ValueError("no such file '"+input_forward_reads_file_path+"'")
elif not os.path.getsize(input_forward_reads_file_path) > 0:
raise ValueError("empty file '"+input_forward_reads_file_path+"'. May have not provided any protein coding genes?")
# set the output path
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)
output_dir = os.path.join(self.scratch,'output.'+str(timestamp))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_aln_file_path = os.path.join(output_dir, params['output_name']+'-MSA.fasta')
file_extension = ''
muscle_cmd.append('-in')
muscle_cmd.append(input_forward_reads_file_path)
muscle_cmd.append('-out')
muscle_cmd.append(output_aln_file_path)
# options
if 'maxiters' in params and params['maxiters'] != None:
muscle_cmd.append('-maxiters')
muscle_cmd.append(str(params['maxiters']))
if 'maxhours' in params and params['maxhours'] != None:
muscle_cmd.append('-maxhours')
muscle_cmd.append(str(params['maxhours']))
# Run MUSCLE, capture output as it happens
#
self.log(console, 'RUNNING MUSCLE:')
self.log(console, ' '+' '.join(muscle_cmd))
# report += "\n"+'running MUSCLE:'+"\n"
# report += ' '+' '.join(muscle_cmd)+"\n"
p = subprocess.Popen(muscle_cmd, \
cwd = self.scratch, \
stdout = subprocess.PIPE, \
stderr = subprocess.STDOUT, \
shell = False)
while True:
line = p.stdout.readline().decode()
if not line: break
self.log(console, line.replace('\n', ''))
p.stdout.close()
p.wait()
self.log(console, 'return code: ' + str(p.returncode))
if p.returncode != 0:
raise ValueError('Error running MUSCLE, return code: '+str(p.returncode) +
'\n\n'+ '\n'.join(console))
# Parse the FASTA MSA output
#
self.log(console, 'PARSING MUSCLE MSA FASTA OUTPUT')
if not os.path.isfile(output_aln_file_path):
raise ValueError("failed to create MUSCLE output: "+output_aln_file_path)
elif not os.path.getsize(output_aln_file_path) > 0:
raise ValueError("created empty file for MUSCLE output: "+output_aln_file_path)
output_aln_file_handle = open (output_aln_file_path, 'r')
output_fasta_buf = []
row_order = []
alignment = {}
alignment_length = None
last_header = None
header = None
last_seq = ''
leading_chars_pattern = re.compile("^\S+")
for line in output_aln_file_handle:
line = line.rstrip('\n')
if line.startswith('>'):
header = line[1:]
if row_labels:
this_id = leading_chars_pattern.findall(header)[0]
this_row_label = re.sub ('\s', '_', row_labels[this_id])
output_fasta_buf.append('>'+this_row_label)
else:
output_fasta_buf.append(line)
if last_header != None:
last_id = leading_chars_pattern.findall(last_header)[0]
row_order.append(last_id)
#self.log(console,"ID: '"+last_id+"'\nALN: '"+last_seq+"'") # DEBUG
#report += last_id+"\t"+last_seq+"\n"
alignment[last_id] = last_seq
if alignment_length == None:
alignment_length = len(last_seq)
elif alignment_length != len(last_seq):
raise ValueError ("unequal alignment row for "+last_header+": '"+last_seq+"'")
last_header = header
last_seq = ''
else:
last_seq += line
output_fasta_buf.append(line)
if last_header != None:
last_id = leading_chars_pattern.findall(last_header)[0]
row_order.append(last_id)
#self.log(console,"ID: '"+last_id+"'\nALN: '"+last_seq+"'") # DEBUG
#report += last_id+"\t"+last_seq+"\n"
alignment[last_id] = last_seq
if alignment_length == None:
alignment_length = len(last_seq)
elif alignment_length != len(last_seq):
raise ValueError ("unequal alignment row for "+last_header+": '"+last_seq+"'")
output_aln_file_handle.close()
# write remapped ids
with open(output_aln_file_path, 'w') as output_aln_file_handle:
output_aln_file_handle.write("\n".join(output_fasta_buf)+"\n")
# load the method provenance from the context object
#
self.log(console,"SETTING PROVENANCE") # DEBUG
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects'] = []
provenance[0]['input_ws_objects'].append(params['input_ref'])
provenance[0]['service'] = 'kb_muscle'
provenance[0]['method'] = 'MUSCLE_prot'
# Upload results
#
if len(invalid_msgs) == 0:
self.log(console,"UPLOADING RESULTS") # DEBUG
MSA_name = params['output_name']
MSA_description = params['desc']
sequence_type = 'protein'
ws_refs = None # may add these later from FeatureSet
kb_refs = None
# alignment_length # already have
# row_order # already have
# alignment # already have
# NO trim_info
# NO alignment_attributes
# NO default_row_labels
# NO parent_msa_ref
# if input_type_name == 'FeatureSet':
# features = featureSet['elements']
# genome2Features = {}
# for fId in row_order:
# genomeRef = features[fId][0]
# if genomeRef not in genome2Features:
# genome2Features[genomeRef] = []
# genome2Features[genomeRef].append(fId)
#
# for genomeRef in genome2Features:
# genome = ws.get_objects([{'ref':genomeRef}])[0]['data']
# these_genomeFeatureIds = genome2Features[genomeRef]
# for feature in genome['features']:
# if feature['id'] in these_genomeFeatureIds:
output_MSA = {
'name': MSA_name,
'description': MSA_description,
'sequence_type': sequence_type,
'alignment_length': alignment_length,
'row_order': row_order,
'alignment': alignment
}
if row_labels:
output_MSA['default_row_labels'] = row_labels
new_obj_info = ws.save_objects({
'workspace': params['workspace_name'],
'objects':[{
'type': 'KBaseTrees.MSA',
'data': output_MSA,
'name': params['output_name'],
'meta': {},
'provenance': provenance
}]
})
# create CLW formatted output file
max_row_width = 60
id_aln_gap_width = 1
gap_chars = ''
for sp_i in range(id_aln_gap_width):
gap_chars += ' '
# DNA
# strong_groups = { 'AG': True,
# 'CTU': True
# }
# weak_groups = None
# PROTEINS
strong_groups = { 'AST': True,
'EKNQ': True,
'HKNQ': True,
'DENQ': True,
'HKQR': True,
'ILMV': True,
'FILM': True,
'HY': True,
'FWY': True
}
weak_groups = { 'ACS': True,
'ATV': True,
'AGS': True,
'KNST': True,
'APST': True,
'DGNS': True,
'DEKNQS': True,
'DEHKNQ': True,
'EHKNQR': True,
'FILMV': True,
'FHY': True
}
clw_buf = []
clw_buf.append ('CLUSTALW format of MUSCLE alignment '+MSA_name+': '+MSA_description)
clw_buf.append ('')
long_id_len = 0
aln_pos_by_id = dict()
for row_id in row_order:
aln_pos_by_id[row_id] = 0
if row_labels:
row_id_disp = row_labels[row_id]
else:
row_id_disp = row_id
if long_id_len < len(row_id_disp):
long_id_len = len(row_id_disp)
full_row_cnt = alignment_length // max_row_width
if alignment_length % max_row_width == 0:
full_row_cnt -= 1
for chunk_i in range (full_row_cnt + 1):
for row_id in row_order:
if row_labels:
row_id_disp = re.sub('\s', '_', row_labels[row_id])
else:
row_id_disp = row_id
for sp_i in range (long_id_len-len(row_id_disp)):
row_id_disp += ' '
aln_chunk_upper_bound = (chunk_i+1)*max_row_width
if aln_chunk_upper_bound > alignment_length:
aln_chunk_upper_bound = alignment_length
aln_chunk = alignment[row_id][chunk_i*max_row_width:aln_chunk_upper_bound]
for c in aln_chunk:
if c != '-':
aln_pos_by_id[row_id] += 1
clw_buf.append (row_id_disp+gap_chars+aln_chunk+' '+str(aln_pos_by_id[row_id]))
# conservation line
cons_line = ''
for pos_i in range(chunk_i*max_row_width, aln_chunk_upper_bound):
col_chars = dict()
seq_cnt = 0
for row_id in row_order:
char = alignment[row_id][pos_i]
if char != '-':
seq_cnt += 1
col_chars[char] = True
if seq_cnt <= 1:
cons_char = ' '
elif len(col_chars.keys()) == 1:
cons_char = '*'
else:
strong = False
for strong_group in strong_groups.keys():
this_strong_group = True
for seen_char in col_chars.keys():
if seen_char not in strong_group:
this_strong_group = False
break
if this_strong_group:
strong = True
break
if not strong:
weak = False
if weak_groups is not None:
for weak_group in weak_groups.keys():
this_weak_group = True
for seen_char in col_chars.keys():
if seen_char not in weak_group:
this_strong_group = False
break
if this_weak_group:
weak = True
if strong:
cons_char = ':'
elif weak:
cons_char = '.'
else:
cons_char = ' '
cons_line += cons_char
lead_space = ''
for sp_i in range(long_id_len):
lead_space += ' '
lead_space += gap_chars
clw_buf.append(lead_space+cons_line)
clw_buf.append('')
# write clw to file
clw_buf_str = "\n".join(clw_buf)+"\n"
output_clw_file_path = os.path.join(output_dir, input_name+'-MSA.clw')
with open (output_clw_file_path, 'w') as output_clw_file_handle:
output_clw_file_handle.write(clw_buf_str)
# upload MUSCLE FASTA output to SHOCK for file_links
dfu = DFUClient(self.callbackURL)
try:
output_upload_ret = dfu.file_to_shock({'file_path': output_aln_file_path,
# DEBUG
# 'make_handle': 0,
# 'pack': 'zip'})
'make_handle': 0})
except:
raise ValueError ('error loading aln_out file to shock')
# upload MUSCLE CLW output to SHOCK for file_links
try:
output_clw_upload_ret = dfu.file_to_shock({'file_path': output_clw_file_path,
# DEBUG
# 'make_handle': 0,
# 'pack': 'zip'})
'make_handle': 0})
except:
raise ValueError ('error loading clw_out file to shock')
# make HTML reports
#
# HERE
# build output report object
#
self.log(console,"BUILDING REPORT") # DEBUG
reportName = 'muscle_report_'+str(uuid.uuid4())
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_name'],
'description':'MUSCLE_prot MSA'}],
#'message': '',
'message': clw_buf_str,
'file_links': [],
'workspace_name': params['workspace_name'],
'report_object_name': reportName
}
reportObj['file_links'] = [{'shock_id': output_upload_ret['shock_id'],
'name': params['output_name']+'-MUSCLE_prot.FASTA',
'label': 'MUSCLE_prot FASTA'
},
{'shock_id': output_clw_upload_ret['shock_id'],
'name': params['output_name']+'-MUSCLE_prot.CLW',
'label': 'MUSCLE_prot CLUSTALW'
}]
# save report object
#
SERVICE_VER = 'release'
reportClient = KBaseReport(self.callbackURL, token=ctx['token'], service_ver=SERVICE_VER)
#report_info = report.create({'report':reportObj, 'workspace_name':params['workspace_name']})
report_info = reportClient.create_extended_report(reportObj)
else: # len(invalid_msgs) > 0
reportName = 'muscle_report_'+str(uuid.uuid4())
report += "FAILURE:\n\n"+"\n".join(invalid_msgs)+"\n"
reportObj = {
'objects_created':[],
'text_message':report
}
ws = workspaceService(self.workspaceURL, token=ctx['token'])
report_obj_info = ws.save_objects({
#'id':info[6],
'workspace':params['workspace_name'],
'objects':[
{
'type':'KBaseReport.Report',
'data':reportObj,
'name':reportName,
'meta':{},
'hidden':1,
'provenance':provenance
}
]
})[0]
report_info = dict()
report_info['name'] = report_obj_info[1]
report_info['ref'] = str(report_obj_info[6])+'/'+str(report_obj_info[0])+'/'+str(report_obj_info[4])
# done
returnVal = { 'report_name': report_info['name'],
'report_ref': report_info['ref']
}
self.log(console,"MUSCLE_prot DONE")
#END MUSCLE_prot
# At some point might do deeper type checking...
if not isinstance(returnVal, dict):
raise ValueError('Method MUSCLE_prot return value ' +
'returnVal is not type dict as required.')
# return the results
return [returnVal]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 44.445042
| 269
| 0.505477
| 7,493
| 74,401
| 4.743627
| 0.085947
| 0.021157
| 0.036349
| 0.037812
| 0.858457
| 0.844643
| 0.835781
| 0.833699
| 0.822305
| 0.8171
| 0
| 0.004913
| 0.395371
| 74,401
| 1,673
| 270
| 44.471608
| 0.785217
| 0.128493
| 0
| 0.75223
| 0
| 0
| 0.115648
| 0.009579
| 0.001982
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004955
| 0.021804
| null | null | 0.002973
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
053a40632322ffefdade2af9e0287247c26efb67
| 9,361
|
py
|
Python
|
tests/test_PIHandlerODBC.py
|
g-parki/tagreader-python
|
dba867d0ac1e05166e5b0cc3e42557264280727f
|
[
"MIT"
] | 23
|
2020-04-16T17:23:33.000Z
|
2022-03-31T21:44:10.000Z
|
tests/test_PIHandlerODBC.py
|
g-parki/tagreader-python
|
dba867d0ac1e05166e5b0cc3e42557264280727f
|
[
"MIT"
] | 62
|
2020-05-27T11:25:23.000Z
|
2022-03-11T07:03:48.000Z
|
tests/test_PIHandlerODBC.py
|
g-parki/tagreader-python
|
dba867d0ac1e05166e5b0cc3e42557264280727f
|
[
"MIT"
] | 10
|
2020-08-18T08:24:27.000Z
|
2022-03-08T20:53:59.000Z
|
import pytest
import pandas as pd
from tagreader import utils
from tagreader.utils import ReaderType
START_TIME = "2018-01-17 16:00:00"
STOP_TIME = "2018-01-17 17:00:00"
SAMPLE_TIME = 60
@pytest.fixture(scope="module")
def PIHandler():
from tagreader.odbc_handlers import PIHandlerODBC
yield PIHandlerODBC(
"thehostname.statoil.net", 1234, options={"das_server": "the_das_server"}
)
def test_generate_connection_string(PIHandler):
res = PIHandler.generate_connection_string()
expected = (
"DRIVER={PI ODBC Driver};Server=the_das_server;Trusted_Connection=Yes;"
"Command Timeout=1800;Provider Type=PIOLEDB;"
"Provider String={Data source=thehostname;Integrated_Security=SSPI;"
"Time Zone=UTC};"
)
assert expected == res
@pytest.mark.parametrize(
"read_type",
[
"RAW",
# pytest.param(
# "SHAPEPRESERVING", marks=pytest.mark.skip(reason="Not implemented")
# ),
"INT",
"MIN",
"MAX",
"RNG",
"AVG",
"STD",
"VAR",
# pytest.param("COUNT", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("GOOD", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("BAD", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("TOTAL", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("SUM", marks=pytest.mark.skip(reason="Not implemented")),
"SNAPSHOT",
],
)
def test_generate_tag_read_query(PIHandler, read_type):
starttime = utils.ensure_datetime_with_tz(START_TIME)
stoptime = utils.ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(SAMPLE_TIME, unit="s")
if read_type == "SNAPSHOT":
res = PIHandler.generate_read_query(
"thetag", None, None, None, getattr(ReaderType, read_type)
)
else:
res = PIHandler.generate_read_query(
"thetag", starttime, stoptime, ts, getattr(ReaderType, read_type)
)
expected = {
"RAW": (
"SELECT TOP 100000 CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[picomp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"ORDER BY time"
),
"INT": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[piinterp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"MIN": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[pimin] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"MAX": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[pimax] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"RNG": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[pirange] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"AVG": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[piavg] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"STD": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[pistd] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"VAR": (
"SELECT POWER(CAST(value as FLOAT32), 2) AS value, time "
"FROM [piarchive]..[pistd] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"SNAPSHOT": (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[pisnapshot] WHERE tag='thetag'"
),
}
assert expected[read_type] == res
@pytest.mark.parametrize(
"read_type",
[
"RAW",
# pytest.param(
# "SHAPEPRESERVING", marks=pytest.mark.skip(reason="Not implemented")
# ),
"INT",
"MIN",
"MAX",
"RNG",
"AVG",
"STD",
"VAR",
# pytest.param("COUNT", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("GOOD", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("BAD", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("TOTAL", marks=pytest.mark.skip(reason="Not implemented")),
# pytest.param("SUM", marks=pytest.mark.skip(reason="Not implemented")),
"SNAPSHOT",
],
)
def test_generate_tag_read_query_with_status(PIHandler, read_type):
starttime = utils.ensure_datetime_with_tz(START_TIME)
stoptime = utils.ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(SAMPLE_TIME, unit="s")
if read_type == "SNAPSHOT":
res = PIHandler.generate_read_query(
"thetag", None, None, None, getattr(ReaderType, read_type), get_status=True
)
else:
res = PIHandler.generate_read_query(
"thetag",
starttime,
stoptime,
ts,
getattr(ReaderType, read_type),
get_status=True,
)
expected = {
"RAW": (
"SELECT TOP 100000 CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[picomp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"ORDER BY time"
),
"INT": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[piinterp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"MIN": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pimin] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"MAX": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pimax] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"RNG": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pirange] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"AVG": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[piavg] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"STD": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pistd] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"VAR": (
"SELECT POWER(CAST(value as FLOAT32), 2) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pistd] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '60s') ORDER BY time"
),
"SNAPSHOT": (
"SELECT CAST(value as FLOAT32) AS value, "
"status, questionable, substituted, time "
"FROM [piarchive]..[pisnapshot] WHERE tag='thetag'"
),
}
assert expected[read_type] == res
def test_genreadquery_long_sampletime(PIHandler):
starttime = utils.ensure_datetime_with_tz(START_TIME)
stoptime = utils.ensure_datetime_with_tz(STOP_TIME)
ts = pd.Timedelta(86401, unit="s")
res = PIHandler.generate_read_query(
"thetag", starttime, stoptime, ts, ReaderType.INT
)
expected = (
"SELECT CAST(value as FLOAT32) AS value, time "
"FROM [piarchive]..[piinterp2] WHERE tag='thetag' "
"AND (time BETWEEN '17-Jan-18 15:00:00' AND '17-Jan-18 16:00:00') "
"AND (timestep = '86401s') ORDER BY time"
)
assert expected == res
| 37.745968
| 87
| 0.557312
| 1,125
| 9,361
| 4.567111
| 0.122667
| 0.028026
| 0.046322
| 0.066563
| 0.869599
| 0.869599
| 0.869599
| 0.864539
| 0.864539
| 0.854029
| 0
| 0.071939
| 0.297618
| 9,361
| 247
| 88
| 37.898785
| 0.709506
| 0.095823
| 0
| 0.759434
| 0
| 0.080189
| 0.5029
| 0.041553
| 0
| 0
| 0
| 0
| 0.018868
| 1
| 0.023585
| false
| 0
| 0.023585
| 0
| 0.04717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.