hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aabc3f70d6e617954f018345ae4877663f51d1cf
| 35,236
|
py
|
Python
|
sdk/python/pulumi_oci/meteringcomputation/usage.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/meteringcomputation/usage.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/meteringcomputation/usage.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['UsageArgs', 'Usage']
@pulumi.input_type
class UsageArgs:
def __init__(__self__, *,
granularity: pulumi.Input[str],
tenant_id: pulumi.Input[str],
time_usage_ended: pulumi.Input[str],
time_usage_started: pulumi.Input[str],
compartment_depth: Optional[pulumi.Input[float]] = None,
filter: Optional[pulumi.Input[str]] = None,
forecast: Optional[pulumi.Input['UsageForecastArgs']] = None,
group_bies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_by_tags: Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]] = None,
is_aggregate_by_time: Optional[pulumi.Input[bool]] = None,
query_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Usage resource.
:param pulumi.Input[str] granularity: The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
:param pulumi.Input[str] tenant_id: Tenant ID.
:param pulumi.Input[str] time_usage_ended: The usage end time.
:param pulumi.Input[str] time_usage_started: The usage start time.
:param pulumi.Input[float] compartment_depth: The compartment depth level.
:param pulumi.Input['UsageForecastArgs'] forecast: Forecast configuration of usage/cost.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_bies: Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
:param pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]] group_by_tags: GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
:param pulumi.Input[bool] is_aggregate_by_time: Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
:param pulumi.Input[str] query_type: The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
"""
pulumi.set(__self__, "granularity", granularity)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "time_usage_ended", time_usage_ended)
pulumi.set(__self__, "time_usage_started", time_usage_started)
if compartment_depth is not None:
pulumi.set(__self__, "compartment_depth", compartment_depth)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if forecast is not None:
pulumi.set(__self__, "forecast", forecast)
if group_bies is not None:
pulumi.set(__self__, "group_bies", group_bies)
if group_by_tags is not None:
pulumi.set(__self__, "group_by_tags", group_by_tags)
if is_aggregate_by_time is not None:
pulumi.set(__self__, "is_aggregate_by_time", is_aggregate_by_time)
if query_type is not None:
pulumi.set(__self__, "query_type", query_type)
@property
@pulumi.getter
def granularity(self) -> pulumi.Input[str]:
"""
The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
"""
return pulumi.get(self, "granularity")
@granularity.setter
def granularity(self, value: pulumi.Input[str]):
pulumi.set(self, "granularity", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
Tenant ID.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="timeUsageEnded")
def time_usage_ended(self) -> pulumi.Input[str]:
"""
The usage end time.
"""
return pulumi.get(self, "time_usage_ended")
@time_usage_ended.setter
def time_usage_ended(self, value: pulumi.Input[str]):
pulumi.set(self, "time_usage_ended", value)
@property
@pulumi.getter(name="timeUsageStarted")
def time_usage_started(self) -> pulumi.Input[str]:
"""
The usage start time.
"""
return pulumi.get(self, "time_usage_started")
@time_usage_started.setter
def time_usage_started(self, value: pulumi.Input[str]):
pulumi.set(self, "time_usage_started", value)
@property
@pulumi.getter(name="compartmentDepth")
def compartment_depth(self) -> Optional[pulumi.Input[float]]:
"""
The compartment depth level.
"""
return pulumi.get(self, "compartment_depth")
@compartment_depth.setter
def compartment_depth(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "compartment_depth", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def forecast(self) -> Optional[pulumi.Input['UsageForecastArgs']]:
"""
Forecast configuration of usage/cost.
"""
return pulumi.get(self, "forecast")
@forecast.setter
def forecast(self, value: Optional[pulumi.Input['UsageForecastArgs']]):
pulumi.set(self, "forecast", value)
@property
@pulumi.getter(name="groupBies")
def group_bies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
"""
return pulumi.get(self, "group_bies")
@group_bies.setter
def group_bies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_bies", value)
@property
@pulumi.getter(name="groupByTags")
def group_by_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]]:
"""
GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
"""
return pulumi.get(self, "group_by_tags")
@group_by_tags.setter
def group_by_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]]):
pulumi.set(self, "group_by_tags", value)
@property
@pulumi.getter(name="isAggregateByTime")
def is_aggregate_by_time(self) -> Optional[pulumi.Input[bool]]:
"""
Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
"""
return pulumi.get(self, "is_aggregate_by_time")
@is_aggregate_by_time.setter
def is_aggregate_by_time(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_aggregate_by_time", value)
@property
@pulumi.getter(name="queryType")
def query_type(self) -> Optional[pulumi.Input[str]]:
"""
The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
"""
return pulumi.get(self, "query_type")
@query_type.setter
def query_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_type", value)
@pulumi.input_type
class _UsageState:
def __init__(__self__, *,
compartment_depth: Optional[pulumi.Input[float]] = None,
filter: Optional[pulumi.Input[str]] = None,
forecast: Optional[pulumi.Input['UsageForecastArgs']] = None,
granularity: Optional[pulumi.Input[str]] = None,
group_bies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_by_tags: Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]] = None,
is_aggregate_by_time: Optional[pulumi.Input[bool]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['UsageItemArgs']]]] = None,
query_type: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
time_usage_ended: Optional[pulumi.Input[str]] = None,
time_usage_started: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Usage resources.
:param pulumi.Input[float] compartment_depth: The compartment depth level.
:param pulumi.Input['UsageForecastArgs'] forecast: Forecast configuration of usage/cost.
:param pulumi.Input[str] granularity: The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_bies: Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
:param pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]] group_by_tags: GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
:param pulumi.Input[bool] is_aggregate_by_time: Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
:param pulumi.Input[Sequence[pulumi.Input['UsageItemArgs']]] items: A list of usage items.
:param pulumi.Input[str] query_type: The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
:param pulumi.Input[str] tenant_id: Tenant ID.
:param pulumi.Input[str] time_usage_ended: The usage end time.
:param pulumi.Input[str] time_usage_started: The usage start time.
"""
if compartment_depth is not None:
pulumi.set(__self__, "compartment_depth", compartment_depth)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if forecast is not None:
pulumi.set(__self__, "forecast", forecast)
if granularity is not None:
pulumi.set(__self__, "granularity", granularity)
if group_bies is not None:
pulumi.set(__self__, "group_bies", group_bies)
if group_by_tags is not None:
pulumi.set(__self__, "group_by_tags", group_by_tags)
if is_aggregate_by_time is not None:
pulumi.set(__self__, "is_aggregate_by_time", is_aggregate_by_time)
if items is not None:
pulumi.set(__self__, "items", items)
if query_type is not None:
pulumi.set(__self__, "query_type", query_type)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if time_usage_ended is not None:
pulumi.set(__self__, "time_usage_ended", time_usage_ended)
if time_usage_started is not None:
pulumi.set(__self__, "time_usage_started", time_usage_started)
@property
@pulumi.getter(name="compartmentDepth")
def compartment_depth(self) -> Optional[pulumi.Input[float]]:
"""
The compartment depth level.
"""
return pulumi.get(self, "compartment_depth")
@compartment_depth.setter
def compartment_depth(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "compartment_depth", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def forecast(self) -> Optional[pulumi.Input['UsageForecastArgs']]:
"""
Forecast configuration of usage/cost.
"""
return pulumi.get(self, "forecast")
@forecast.setter
def forecast(self, value: Optional[pulumi.Input['UsageForecastArgs']]):
pulumi.set(self, "forecast", value)
@property
@pulumi.getter
def granularity(self) -> Optional[pulumi.Input[str]]:
"""
The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
"""
return pulumi.get(self, "granularity")
@granularity.setter
def granularity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "granularity", value)
@property
@pulumi.getter(name="groupBies")
def group_bies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
"""
return pulumi.get(self, "group_bies")
@group_bies.setter
def group_bies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_bies", value)
@property
@pulumi.getter(name="groupByTags")
def group_by_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]]:
"""
GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
"""
return pulumi.get(self, "group_by_tags")
@group_by_tags.setter
def group_by_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UsageGroupByTagArgs']]]]):
pulumi.set(self, "group_by_tags", value)
@property
@pulumi.getter(name="isAggregateByTime")
def is_aggregate_by_time(self) -> Optional[pulumi.Input[bool]]:
"""
Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
"""
return pulumi.get(self, "is_aggregate_by_time")
@is_aggregate_by_time.setter
def is_aggregate_by_time(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_aggregate_by_time", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UsageItemArgs']]]]:
"""
A list of usage items.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UsageItemArgs']]]]):
pulumi.set(self, "items", value)
@property
@pulumi.getter(name="queryType")
def query_type(self) -> Optional[pulumi.Input[str]]:
"""
The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
"""
return pulumi.get(self, "query_type")
@query_type.setter
def query_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query_type", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Tenant ID.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="timeUsageEnded")
def time_usage_ended(self) -> Optional[pulumi.Input[str]]:
"""
The usage end time.
"""
return pulumi.get(self, "time_usage_ended")
@time_usage_ended.setter
def time_usage_ended(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_usage_ended", value)
@property
@pulumi.getter(name="timeUsageStarted")
def time_usage_started(self) -> Optional[pulumi.Input[str]]:
"""
The usage start time.
"""
return pulumi.get(self, "time_usage_started")
@time_usage_started.setter
def time_usage_started(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_usage_started", value)
class Usage(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_depth: Optional[pulumi.Input[float]] = None,
filter: Optional[pulumi.Input[str]] = None,
forecast: Optional[pulumi.Input[pulumi.InputType['UsageForecastArgs']]] = None,
granularity: Optional[pulumi.Input[str]] = None,
group_bies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_by_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageGroupByTagArgs']]]]] = None,
is_aggregate_by_time: Optional[pulumi.Input[bool]] = None,
query_type: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
time_usage_ended: Optional[pulumi.Input[str]] = None,
time_usage_started: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Usage resource in Oracle Cloud Infrastructure Metering Computation service.
Returns usage for the given account.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_usage = oci.meteringcomputation.Usage("testUsage",
granularity=var["usage_granularity"],
tenant_id=oci_metering_computation_tenant["test_tenant"]["id"],
time_usage_ended=var["usage_time_usage_ended"],
time_usage_started=var["usage_time_usage_started"],
compartment_depth=var["usage_compartment_depth"],
filter=var["usage_filter"],
forecast=oci.meteringcomputation.UsageForecastArgs(
time_forecast_ended=var["usage_forecast_time_forecast_ended"],
forecast_type=var["usage_forecast_forecast_type"],
time_forecast_started=var["usage_forecast_time_forecast_started"],
),
group_bies=var["usage_group_by"],
group_by_tags=[oci.meteringcomputation.UsageGroupByTagArgs(
key=var["usage_group_by_tag_key"],
namespace=var["usage_group_by_tag_namespace"],
value=var["usage_group_by_tag_value"],
)],
is_aggregate_by_time=var["usage_is_aggregate_by_time"],
query_type=var["usage_query_type"])
```
## Import
Import is not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] compartment_depth: The compartment depth level.
:param pulumi.Input[pulumi.InputType['UsageForecastArgs']] forecast: Forecast configuration of usage/cost.
:param pulumi.Input[str] granularity: The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_bies: Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageGroupByTagArgs']]]] group_by_tags: GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
:param pulumi.Input[bool] is_aggregate_by_time: Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
:param pulumi.Input[str] query_type: The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
:param pulumi.Input[str] tenant_id: Tenant ID.
:param pulumi.Input[str] time_usage_ended: The usage end time.
:param pulumi.Input[str] time_usage_started: The usage start time.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UsageArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Usage resource in Oracle Cloud Infrastructure Metering Computation service.
Returns usage for the given account.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_usage = oci.meteringcomputation.Usage("testUsage",
granularity=var["usage_granularity"],
tenant_id=oci_metering_computation_tenant["test_tenant"]["id"],
time_usage_ended=var["usage_time_usage_ended"],
time_usage_started=var["usage_time_usage_started"],
compartment_depth=var["usage_compartment_depth"],
filter=var["usage_filter"],
forecast=oci.meteringcomputation.UsageForecastArgs(
time_forecast_ended=var["usage_forecast_time_forecast_ended"],
forecast_type=var["usage_forecast_forecast_type"],
time_forecast_started=var["usage_forecast_time_forecast_started"],
),
group_bies=var["usage_group_by"],
group_by_tags=[oci.meteringcomputation.UsageGroupByTagArgs(
key=var["usage_group_by_tag_key"],
namespace=var["usage_group_by_tag_namespace"],
value=var["usage_group_by_tag_value"],
)],
is_aggregate_by_time=var["usage_is_aggregate_by_time"],
query_type=var["usage_query_type"])
```
## Import
Import is not supported for this resource.
:param str resource_name: The name of the resource.
:param UsageArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UsageArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_depth: Optional[pulumi.Input[float]] = None,
filter: Optional[pulumi.Input[str]] = None,
forecast: Optional[pulumi.Input[pulumi.InputType['UsageForecastArgs']]] = None,
granularity: Optional[pulumi.Input[str]] = None,
group_bies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_by_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageGroupByTagArgs']]]]] = None,
is_aggregate_by_time: Optional[pulumi.Input[bool]] = None,
query_type: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
time_usage_ended: Optional[pulumi.Input[str]] = None,
time_usage_started: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UsageArgs.__new__(UsageArgs)
__props__.__dict__["compartment_depth"] = compartment_depth
__props__.__dict__["filter"] = filter
__props__.__dict__["forecast"] = forecast
if granularity is None and not opts.urn:
raise TypeError("Missing required property 'granularity'")
__props__.__dict__["granularity"] = granularity
__props__.__dict__["group_bies"] = group_bies
__props__.__dict__["group_by_tags"] = group_by_tags
__props__.__dict__["is_aggregate_by_time"] = is_aggregate_by_time
__props__.__dict__["query_type"] = query_type
if tenant_id is None and not opts.urn:
raise TypeError("Missing required property 'tenant_id'")
__props__.__dict__["tenant_id"] = tenant_id
if time_usage_ended is None and not opts.urn:
raise TypeError("Missing required property 'time_usage_ended'")
__props__.__dict__["time_usage_ended"] = time_usage_ended
if time_usage_started is None and not opts.urn:
raise TypeError("Missing required property 'time_usage_started'")
__props__.__dict__["time_usage_started"] = time_usage_started
__props__.__dict__["items"] = None
super(Usage, __self__).__init__(
'oci:meteringcomputation/usage:Usage',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compartment_depth: Optional[pulumi.Input[float]] = None,
filter: Optional[pulumi.Input[str]] = None,
forecast: Optional[pulumi.Input[pulumi.InputType['UsageForecastArgs']]] = None,
granularity: Optional[pulumi.Input[str]] = None,
group_bies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_by_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageGroupByTagArgs']]]]] = None,
is_aggregate_by_time: Optional[pulumi.Input[bool]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageItemArgs']]]]] = None,
query_type: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
time_usage_ended: Optional[pulumi.Input[str]] = None,
time_usage_started: Optional[pulumi.Input[str]] = None) -> 'Usage':
"""
Get an existing Usage resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] compartment_depth: The compartment depth level.
:param pulumi.Input[pulumi.InputType['UsageForecastArgs']] forecast: Forecast configuration of usage/cost.
:param pulumi.Input[str] granularity: The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_bies: Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageGroupByTagArgs']]]] group_by_tags: GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
:param pulumi.Input[bool] is_aggregate_by_time: Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UsageItemArgs']]]] items: A list of usage items.
:param pulumi.Input[str] query_type: The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
:param pulumi.Input[str] tenant_id: Tenant ID.
:param pulumi.Input[str] time_usage_ended: The usage end time.
:param pulumi.Input[str] time_usage_started: The usage start time.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UsageState.__new__(_UsageState)
__props__.__dict__["compartment_depth"] = compartment_depth
__props__.__dict__["filter"] = filter
__props__.__dict__["forecast"] = forecast
__props__.__dict__["granularity"] = granularity
__props__.__dict__["group_bies"] = group_bies
__props__.__dict__["group_by_tags"] = group_by_tags
__props__.__dict__["is_aggregate_by_time"] = is_aggregate_by_time
__props__.__dict__["items"] = items
__props__.__dict__["query_type"] = query_type
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["time_usage_ended"] = time_usage_ended
__props__.__dict__["time_usage_started"] = time_usage_started
return Usage(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="compartmentDepth")
def compartment_depth(self) -> pulumi.Output[float]:
"""
The compartment depth level.
"""
return pulumi.get(self, "compartment_depth")
@property
@pulumi.getter
def filter(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "filter")
@property
@pulumi.getter
def forecast(self) -> pulumi.Output['outputs.UsageForecast']:
"""
Forecast configuration of usage/cost.
"""
return pulumi.get(self, "forecast")
@property
@pulumi.getter
def granularity(self) -> pulumi.Output[str]:
"""
The usage granularity. HOURLY - Hourly data aggregation. DAILY - Daily data aggregation. MONTHLY - Monthly data aggregation. TOTAL - Not yet supported.
"""
return pulumi.get(self, "granularity")
@property
@pulumi.getter(name="groupBies")
def group_bies(self) -> pulumi.Output[Sequence[str]]:
"""
Aggregate the result by. example: `["tagNamespace", "tagKey", "tagValue", "service", "skuName", "skuPartNumber", "unit", "compartmentName", "compartmentPath", "compartmentId", "platform", "region", "logicalAd", "resourceId", "tenantId", "tenantName"]`
"""
return pulumi.get(self, "group_bies")
@property
@pulumi.getter(name="groupByTags")
def group_by_tags(self) -> pulumi.Output[Sequence['outputs.UsageGroupByTag']]:
"""
GroupBy a specific tagKey. Provide the tagNamespace and tagKey in the tag object. Only supports one tag in the list. For example: `[{"namespace":"oracle", "key":"createdBy"]`
"""
return pulumi.get(self, "group_by_tags")
@property
@pulumi.getter(name="isAggregateByTime")
def is_aggregate_by_time(self) -> pulumi.Output[bool]:
"""
Whether aggregated by time. If isAggregateByTime is true, all usage/cost over the query time period will be added up.
"""
return pulumi.get(self, "is_aggregate_by_time")
@property
@pulumi.getter
def items(self) -> pulumi.Output[Sequence['outputs.UsageItem']]:
"""
A list of usage items.
"""
return pulumi.get(self, "items")
@property
@pulumi.getter(name="queryType")
def query_type(self) -> pulumi.Output[str]:
"""
The query usage type. COST by default if it is missing. Usage - Query the usage data. Cost - Query the cost/billing data. Credit - Query the credit adjustments data. ExpiredCredit - Query the expired credits data. AllCredit - Query the credit adjustments and expired credit.
"""
return pulumi.get(self, "query_type")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
Tenant ID.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="timeUsageEnded")
def time_usage_ended(self) -> pulumi.Output[str]:
"""
The usage end time.
"""
return pulumi.get(self, "time_usage_ended")
@property
@pulumi.getter(name="timeUsageStarted")
def time_usage_started(self) -> pulumi.Output[str]:
"""
The usage start time.
"""
return pulumi.get(self, "time_usage_started")
| 49.838755
| 320
| 0.661341
| 4,108
| 35,236
| 5.436952
| 0.055501
| 0.089143
| 0.077412
| 0.04137
| 0.922722
| 0.907499
| 0.892366
| 0.878755
| 0.864876
| 0.845847
| 0
| 0.000037
| 0.226303
| 35,236
| 706
| 321
| 49.909348
| 0.819236
| 0.379243
| 0
| 0.754386
| 1
| 0
| 0.118341
| 0.003918
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162907
| false
| 0.002506
| 0.017544
| 0.007519
| 0.278195
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2acceb2c8230138934abd76ad4087e58be2f9042
| 48,011
|
py
|
Python
|
netapp_activeiq_api/apis/cluster_analytics_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | 3
|
2021-09-28T23:22:59.000Z
|
2021-11-23T14:53:54.000Z
|
netapp_activeiq_api/apis/cluster_analytics_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | null | null | null |
netapp_activeiq_api/apis/cluster_analytics_api.py
|
woutercoppens/netapp-activeiq-api
|
a8f86355ecdd769953b69e38824b4db07c11c89e
|
[
"Apache-2.0"
] | 1
|
2021-04-01T11:22:23.000Z
|
2021-04-01T11:22:23.000Z
|
from .api_client import ApiClient
class ClusterAnalyticsApi:
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_adapter_interface(self, serial_no, **kwargs): # noqa: E501
"""Provides Adapter Interface data. # noqa: E501
Provides Adapter Interface data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_adapter_interface" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_adapter_interface`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-adapter-interface/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_aggregate_efficiency(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Local Tier Efficiency. # noqa: E501
Displays the efficiency data using 'AGGR-EFFICIENCY.XML' section. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregate_efficiency" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_aggregate_efficiency`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-aggregate-efficiency/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_aggregate_summary(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Local Tier Summary. # noqa: E501
Displays the data for the Local Tier Summary represented in the categories provided below: Local Tier Name, Local Tier Type, RAID Type, Disk Count, Data Disk Count, Usable Capacity (TiB), Used Capacity (TiB), Available Capacity (TiB), Physical Capacity (TiB), Logical Capacity (TiB), Used Data Percentage, Number of RAID Groups, RAID Group Size, # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregate_summary" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_aggregate_summary`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-aggregate-summary/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_cable_visualization(self, serial_no, **kwargs): # noqa: E501
"""Provides Cable Visualization data. # noqa: E501
Cable visualization shows data for controller, shelves, switches and auto bridges and it also shows connection between them. Shelves are grouped into stack. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cable_visualization" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_cable_visualization`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-cable-visualization/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_capacity_headroom_details(self, serial_no, **kwargs): # noqa: E501
"""Provides Capacity Headroom table data. # noqa: E501
Provides Capacity Headroom table data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_capacity_headroom_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_capacity_headroom_details`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
if "authorization_token" in params:
header_params["authorizationToken"] = params[
"authorization_token"
] # noqa: E501
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-capacity-headroom/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_cluster_configuration(self, uuid, **kwargs): # noqa: E501
"""Provides the cluster IP address, node, and release version data of a specific cluster UUID. # noqa: E501
Displays the cluster IP address, node, and release version data of a specific cluster UUID. # noqa: E501
:param str uuid: Specifies the required cluster ID or UUID. (required)
:param str lang: Value representing a language
"""
all_params = ["uuid", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cluster_configuration" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'uuid' is set
if "uuid" not in params or params["uuid"] is None:
raise ValueError(
"Missing the required parameter `uuid` when calling `get_cluster_configuration`"
) # noqa: E501
path_params = {}
if "uuid" in params:
path_params["uuid"] = params["uuid"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-cluster-configuration/{uuid}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_cluster_summary(self, serial_no, **kwargs): # noqa: E501
"""Provides the data for the Cluster Summary. # noqa: E501
Displays the data for the Cluster Summary represented in the categories provided below: Cluster Name, Cluster Management IP Address, Raw Capacity (TiB), Usable Capacity (TiB), Used Capacity (TiB), Available Capacity (TiB), Physical Capacity (TiB), Logical Capacity (TiB), High-Availability Configured, Node Storage VMs, Data Storage VMs, Local Tiers, Volumes, LUNs, Qtrees, SnapMirror # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cluster_summary" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_cluster_summary`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-cluster-summary/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_disk_details(self, serial_no, **kwargs): # noqa: E501
"""Provides Disk Details. # noqa: E501
It provides each disk details and raid group details. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_disk_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_disk_details`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-disk-details/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_free_slot_details(self, serial_no, **kwargs): # noqa: E501
"""Provides free slots data. # noqa: E501
Provides free slots data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_free_slot_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'authorization_token' is set
if "authorization_token" not in params or params["authorization_token"] is None:
raise ValueError(
"Missing the required parameter `authorization_token` when calling `get_free_slot_details`"
) # noqa: E501
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_free_slot_details`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-free-slot-details/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_max_supported_capacity_details(self, serial_no, **kwargs): # noqa: E501
"""Provides Max Supported Capacity data. # noqa: E501
Provides Max Supported Capacity data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_max_supported_capacity_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_max_supported_capacity_details`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-max-supported-capacity/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_module_overview(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Module Overview. # noqa: E501
Displays the data for the Module Overview represented in the categories provided below: Module Type, Number of Shelf Modules # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_module_overview" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_module_overview`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-module-overview/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_network_interfaces(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Network Interface. # noqa: E501
Displays the data for the Network Interface represented in the categories provided below: Storage Virtual Machine, Logical Interface, Role, Status(Admin/Operational), Network Address, Current Port, Is Home, Failover Group Name # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_network_interfaces" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_network_interfaces`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-network-interfaces/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_network_ports(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Network Ports. # noqa: E501
Displays the data for the Network Ports represented in the categories provided below: Port, Role, Link, Maximum Transmission Unit (MTU), MAC Address, Operational Speed, IPspace Name, Broadcast Domain, Interface Group Owner # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_network_ports" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_network_ports`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-network-ports/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_node_slot_map(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Hardware Slot Map. # noqa: E501
Displays the data for the Hardware Slot Map represented in the categories provided below: Slot Number, Description, Part Number, Serial Number # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_slot_map" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_node_slot_map`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-node-slot-map/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_node_summary(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Node Summary. # noqa: E501
Displays the data for the Node Summary represented in the categories provided below: Device Type, System Operating Mode, Cluster Name, Hostname, Serial Number, System ID, Release Version, Model, Configuration, IP Address, High-Availability Partner Hostname, High-Availability Partner System ID, Raw Capacity (TiB), Usable Capacity (TiB), Used Capacity (TiB), Available Capacity (TiB), Physical Capacity (TiB), Logical Capacity (TiB), Installed Licenses # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_summary" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_node_summary`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-node-summary/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_node_summary_count(self, serial_no, **kwargs): # noqa: E501
"""Provides data of the systems running 7-Mode and presented in the Node Summary Count table. # noqa: E501
Displays data in the Node Summary Count represented by the categories provided below: Local Tiers, Volumes, LUNs # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_node_summary_count" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_node_summary_count`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-node-summary-count/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_raid_disk_visualization(self, serial_no, **kwargs): # noqa: E501
"""Provides Raid Disk Visualization data. # noqa: E501
Raid Disk Visualization provides disk data for each shelf that is grouped under stack. It provides aggregate data with color coding to diffrentiate the disks on UI. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_raid_disk_visualization" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_raid_disk_visualization`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-disk-visualization/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_resolver(self, serial_no, **kwargs): # noqa: E501
"""Provides information about a cluster and node for a specific Serial number, Cluster ID, and Job ID. # noqa: E501
Provides information about a cluster and node for a specific Serial number, Cluster ID, and Job ID. # noqa: E501
:param str serial_no: Specifies the serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resolver" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_resolver`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/resolver/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_shelf_adp_data(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Shelf and Drive Summary for ADP. # noqa: E501
Displays the data for the Shelf and Drive Summary ADP represented in the categories provided below: Shelf Type, Shelf Serial Number, Drive Type, Drive Model, Drive RPM, Disk Marketing Size (GiB), Number of Owned Partitions, Number of Data Partitions, Number of Parity Partitions, Number of Spare Partitions, Number of ADP Drives, Number of Unowned Drives # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shelf_adp_data" % key
)
params[key] = val
del params["kwargs"]
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_shelf_adp_data`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-shelf-adp-data/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_shelf_and_drive_count(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Shelf and Drive Count. # noqa: E501
Displays the data for the Shelf and Drive count represented in the categories provided below: Shelf Count, Drive Count # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shelf_and_drive_count" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_shelf_and_drive_count`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-shelf-drive-count/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_shelf_data(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Shelf and Drive Summary. # noqa: E501
Displays the data for the Shelf and Drive Summary represented in the categories provided below: Shelf Type, Shelf Serial Number, Disk Type, Disk Model, Disk RPM, Disk Marketing Size (GiB), Number of Owned Disks, Number of Data Drives, Number of Parity Drives, Number of Spare Disks, Number of Unowned Disks # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shelf_data" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_shelf_data`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-shelf-data/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_stack_details(self, serial_no, **kwargs): # noqa: E501
"""Provides stack and shelf data for the Stack Diagram. # noqa: E501
Provides stack and shelf data for the Stack Diagram table. Disks will be grouped under shelf and shelves will be grouped under stack. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stack_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_stack_details`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-stack-details/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_stack_visualization(self, serial_no, **kwargs): # noqa: E501
"""Provides Stack Visualization data. # noqa: E501
Stack Visualization provides stack and shelf data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stack_visualization" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_stack_visualization`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-stack-visualization/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_switch_details(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Switch Details. # noqa: E501
Displays the data for the Switch Details represented in the categories provided below: Switch Name, Serial Number, IP Address, Model Number, Switch Network, Software Version, SNMP Version, Community String, Is Discovered, Switch Monitoring Status # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_switch_details" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_switch_details`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-switch-details/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_system_options(self, serial_no, **kwargs): # noqa: E501
"""Provides System Options data. # noqa: E501
Provides System Options data. # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_system_options" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_system_options`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-system-options/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_volume_efficiency(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Volume Efficiency. # noqa: E501
Displays the data for the Volume Efficiency represented in the categories provided below: SVM Name, Volume Name, Volume Efficiency Ratio, Logical Used (GiB), Physical Used (GiB), Snapshot Used (GiB), Total Saved (GiB), Total Saved Percentage, Deduplicated (GiB), Deduplicated Percentage, Compressed (GiB), Compressed Percentage, Enabled Efficiency Features # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_volume_efficiency" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_volume_efficiency`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-volume-efficiency/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
def get_volume_summary(self, serial_no, **kwargs): # noqa: E501
"""Provides data for the Volume Summary. # noqa: E501
Displays the data for the Volume Summary represented in the categories provided below: Volume Name, SVM Name, Local Tier Name, Volume Capacity (GiB), Used Capacity (GiB), Available Capacity (GiB), Physical Capacity (GiB), Logical Capacity (GiB), Used Data Percentage, Snapshot Reserve Used Percentage, Snapshots, Volume Thin Provisioned?, Volume Type # noqa: E501
:param str serial_no: Specifies the required serial number field. (required)
:param str lang: Value representing a language
"""
all_params = ["serial_no", "lang"] # noqa: E501
params = locals()
for key, val in params["kwargs"].items():
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_volume_summary" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'serial_no' is set
if "serial_no" not in params or params["serial_no"] is None:
raise ValueError(
"Missing the required parameter `serial_no` when calling `get_volume_summary`"
) # noqa: E501
path_params = {}
if "serial_no" in params:
path_params["serial_no"] = params["serial_no"] # noqa: E501
query_params = []
if "lang" in params:
query_params.append(("lang", params["lang"])) # noqa: E501
header_params = {}
body_params = None
return self.api_client.call_api(
"/v1/clusterview/get-volume-summary/{serial_no}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
)
| 37.923381
| 513
| 0.572973
| 5,480
| 48,011
| 4.860219
| 0.045985
| 0.085605
| 0.054667
| 0.049786
| 0.882181
| 0.866036
| 0.854472
| 0.840242
| 0.800368
| 0.792784
| 0
| 0.018853
| 0.336006
| 48,011
| 1,265
| 514
| 37.95336
| 0.816619
| 0.275104
| 0
| 0.758782
| 0
| 0
| 0.230625
| 0.061554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.001171
| 0
| 0.066745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ae76ae6847b93d08c514f42ea42dac56b7d58a0
| 4,676
|
py
|
Python
|
exp/regularization.py
|
dhatch207/CS349_final_project
|
42f1bae70b3f813df6bd881f7dd7852c7da3e471
|
[
"MIT"
] | null | null | null |
exp/regularization.py
|
dhatch207/CS349_final_project
|
42f1bae70b3f813df6bd881f7dd7852c7da3e471
|
[
"MIT"
] | null | null | null |
exp/regularization.py
|
dhatch207/CS349_final_project
|
42f1bae70b3f813df6bd881f7dd7852c7da3e471
|
[
"MIT"
] | null | null | null |
import numpy as np
class Regularization:
"""
Abstract base class for regularization terms in gradient descent.
*** THIS IS A BASE CLASS: YOU DO NOT NEED TO IMPLEMENT THIS ***
Arguments:
reg_param - (float) The hyperparameter that controls the amount of
regularization to perform. Must be non-negative.
"""
def __init__(self, reg_param=0.05):
self.reg_param = reg_param
def forward(self, w):
"""
Implements the forward pass through the regularization term.
*** THIS IS A BASE CLASS: YOU DO NOT NEED TO IMPLEMENT THIS ***
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
regularization_term - (float) The value of the regularization term
evaluated at w.
"""
pass
def backward(self, w):
"""
Implements the backward pass through the regularization term.
*** THIS IS A BASE CLASS: YOU DO NOT NEED TO IMPLEMENT THIS ***
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
gradient_term - (np.array) A numpy array of length d+1. The
gradient of the regularization term evaluated at w.
"""
pass
class L1Regularization(Regularization):
"""
L1 Regularization for gradient descent.
"""
def forward(self, w):
"""
Implements the forward pass through the regularization term. For L1,
this is the L1-norm of the model parameters weighted by the
regularization parameter. Note that the bias (the last value in w)
should NOT be included in regularization.
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
regularization_term - (float) The value of the regularization term
evaluated at w.
"""
L1_norm = np.sum(np.abs(w[:-1]))
regularization_term = self.reg_param * L1_norm
return regularization_term
def backward(self, w):
"""
Implements the backward pass through the regularization term. The
backward pass is the gradient of the forward pass with respect to the
model parameters.
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
gradient_term - (np.array) A numpy array of length d+1. The
gradient of the regularization term evaluated at w.
"""
gradient_term = self.reg_param * np.sign(w)
gradient_term[-1] = 0
return gradient_term
class L2Regularization(Regularization):
"""
L2 Regularization for gradient descent.
"""
def forward(self, w):
"""
Implements the forward pass through the regularization term. For L2,
this is half the squared L2-norm of the model parameters weighted by
the regularization parameter. Note that the bias (the last value in w)
should NOT be included in regularization.
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
regularization_term - (float) The value of the regularization term
evaluated at w.
"""
L2_norm = np.sum(np.square(w[:-1])) * .5
regularization_term = self.reg_param * L2_norm
return regularization_term
def backward(self, w):
"""
Implements the backward pass through the regularization term. The
backward pass is the gradient of the forward pass with respect to the
model parameters.
Arguments:
w - (np.array) A 1D array of parameters of length d+1. The current
parameters learned by the model. The +1 refers to the bias
term.
Returns:
gradient_term - (np.array) A numpy array of length d+1. The
gradient of the regularization term evaluated at w.
"""
gradient_term = self.reg_param * w
gradient_term[-1] = 0
return gradient_term
| 33.884058
| 78
| 0.601369
| 596
| 4,676
| 4.66443
| 0.144295
| 0.123022
| 0.090647
| 0.032374
| 0.861151
| 0.839568
| 0.839568
| 0.839568
| 0.81295
| 0.81295
| 0
| 0.013862
| 0.336612
| 4,676
| 137
| 79
| 34.131387
| 0.882334
| 0.661891
| 0
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.269231
| false
| 0.076923
| 0.038462
| 0
| 0.576923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 9
|
2d827872a9697b2e97d8a70d0cf9a02e5da2216a
| 89
|
py
|
Python
|
random/python/random_random_int.py
|
lmbaeza/Crypto
|
2cbb085e625713b387d99720fbdeadb0b74f31a1
|
[
"MIT"
] | 1
|
2020-08-31T12:17:06.000Z
|
2020-08-31T12:17:06.000Z
|
random/python/random_random_int.py
|
lmbaeza/Cripto
|
2cbb085e625713b387d99720fbdeadb0b74f31a1
|
[
"MIT"
] | null | null | null |
random/python/random_random_int.py
|
lmbaeza/Cripto
|
2cbb085e625713b387d99720fbdeadb0b74f31a1
|
[
"MIT"
] | null | null | null |
def random_int(from_value, to_value):
return random.randrange(from_value, to_value+1)
| 44.5
| 51
| 0.797753
| 15
| 89
| 4.4
| 0.6
| 0.272727
| 0.333333
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.101124
| 89
| 2
| 51
| 44.5
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
2d838c285c062d17dc63f833b68e42eb82738429
| 4,686
|
py
|
Python
|
tests/test_temp_data.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null |
tests/test_temp_data.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | 125
|
2017-10-12T12:14:23.000Z
|
2022-03-11T23:50:19.000Z
|
tests/test_temp_data.py
|
BookOps-CAT/babel
|
47c8102bfbad8466185cd0e70501a931dd79ef29
|
[
"CC0-1.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from .context import datastore, datastore_worker
class TestDataPopulation(unittest.TestCase):
"""creates test data in datastore"""
def setUp(self):
with datastore.session_scope() as session:
datastore_worker.insert_or_ignore(
session, datastore.User, name="Tomek", bpl_code="t", nyp_code="k"
)
datastore_worker.insert_or_ignore(
session,
datastore.Vendor,
name="China Books",
bpl_code="chbks",
nyp_code="cbks",
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="world lang",
system_id=1,
code="wl",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="fiction",
system_id=1,
code="fc",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="non-fic",
system_id=1,
code="fn",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="NONE",
system_id=1,
code=None,
includes_audn=False,
)
# datastore_worker.insert_or_ignore(session, datastore.ShelfCode, name='childrens place', system_id=1, code='jcp', includes_audn=False)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="world lang",
system_id=2,
code="0l",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="fiction",
system_id=2,
code="0f",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="non-fic",
system_id=2,
code="0n",
includes_audn=True,
)
datastore_worker.insert_or_ignore(
session,
datastore.ShelfCode,
name="NONE",
system_id=2,
code=None,
includes_audn=False,
)
session.commit()
distset = datastore_worker.insert_or_ignore(
session, datastore.DistSet, name="test distr.", system_id=1, user_id=2
)
session.commit()
distgrid = datastore_worker.insert_or_ignore(
session, datastore.DistGrid, name="grid A", distset_id=distset.did
)
session.commit()
datastore_worker.insert_or_ignore(
session,
datastore.GridLocation,
distgrid_id=distgrid.did,
branch_id=11,
shelfcode_id=1,
qty=2,
)
datastore_worker.insert_or_ignore(
session,
datastore.GridLocation,
distgrid_id=distgrid.did,
branch_id=12,
shelfcode_id=1,
qty=1,
)
datastore_worker.insert_or_ignore(
session,
datastore.GridLocation,
distgrid_id=distgrid.did,
branch_id=13,
shelfcode_id=2,
qty=3,
)
distgrid = datastore_worker.insert_or_ignore(
session, datastore.DistGrid, name="grid B", distset_id=distset.did
)
session.commit()
datastore_worker.insert_or_ignore(
session,
datastore.GridLocation,
distgrid_id=distgrid.did,
branch_id=12,
shelfcode_id=1,
qty=2,
)
datastore_worker.insert_or_ignore(
session,
datastore.GridLocation,
distgrid_id=distgrid.did,
branch_id=13,
shelfcode_id=1,
qty=1,
)
def test_start(self):
pass
if __name__ == "__main__":
unittest.main()
| 30.828947
| 147
| 0.46991
| 397
| 4,686
| 5.27204
| 0.206549
| 0.143335
| 0.190635
| 0.208791
| 0.78882
| 0.770186
| 0.770186
| 0.705686
| 0.705686
| 0.705686
| 0
| 0.013651
| 0.452838
| 4,686
| 151
| 148
| 31.033113
| 0.802652
| 0.039906
| 0
| 0.669118
| 0
| 0
| 0.02805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0.007353
| 0.014706
| 0
| 0.036765
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2dc5302ea6257b878dca66d50d271bebf29bb85b
| 90
|
py
|
Python
|
juju_suspend/providers/__init__.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | 3
|
2015-02-13T22:13:38.000Z
|
2015-02-17T02:42:28.000Z
|
juju_suspend/providers/__init__.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | null | null | null |
juju_suspend/providers/__init__.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | null | null | null |
from juju_suspend.providers.local import *
from juju_suspend.providers.openstack import *
| 30
| 46
| 0.844444
| 12
| 90
| 6.166667
| 0.583333
| 0.216216
| 0.405405
| 0.648649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 90
| 2
| 47
| 45
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2de3b7a4d0922525c24151cc4634159320ea5c9d
| 2,299
|
py
|
Python
|
oiasg/define/scenarios.py
|
will7101/OIASG
|
44badff57689da99a2c9896d176b32e7b51d42b5
|
[
"BSD-3-Clause"
] | 1
|
2018-03-17T10:07:11.000Z
|
2018-03-17T10:07:11.000Z
|
oiasg/define/scenarios.py
|
will7101/OIASG
|
44badff57689da99a2c9896d176b32e7b51d42b5
|
[
"BSD-3-Clause"
] | 1
|
2018-03-17T11:35:54.000Z
|
2018-03-17T11:35:54.000Z
|
oiasg/define/scenarios.py
|
will7101/OIASG
|
44badff57689da99a2c9896d176b32e7b51d42b5
|
[
"BSD-3-Clause"
] | null | null | null |
{
'SCENARIOS': {
'random': {
'name': 'RANDOM',
'icon': 'default.png',
'image': 'default.png',
'text': 'N随机角色',
# 游戏预定义文件
'game_define': 'general',
},
'wxhakioi2019': {
'name': 'WXH AK IOI 2019',
'icon': 'orz.gif',
'image': 'default.png',
'text': 'N大佬wxh\n\nAKIOI2019\n\nDALAODALAODALAODALAODALAODALAODALAODALAO DALAODALAODALAODALAODALAODALAODALAODALAODALAODALAO',
'game_define': 'general',
},
'lcaggctsc2018': {
'name': 'CommonAnts GG CTSC 2018',
'icon': 'commonants_icon.png',
'image': 'commonants_image.png',
'text': 'N蒟蒻CommonAnts\nGG CTSC 2018',
'game_define': 'general',
},
'random_1': {
'name': 'RANDOM',
'icon': 'default.png',
'image': 'default.png',
'text': 'N随机角色',
'game_define': 'general',
},
'wxhakioi2019_1': {
'name': 'WXH AK IOI 2019',
'icon': 'orz.gif',
'image': 'default.png',
'text': 'N大佬wxh\nAKIOI2019',
'game_define': 'general',
},
'lcaggctsc2018_1': {
'name': 'CommonAnts GG CTSC 2018',
'icon': 'commonants_icon.png',
'image': 'commonants_image.png',
'text': 'N蒟蒻CommonAnts\nGG CTSC 2018',
'game_define': 'general',
},
'random_2': {
'name': 'RANDOM',
'icon': 'default.png',
'image': 'default.png',
'text': 'N随机角色',
'game_define': 'general',
},
'wxhakioi2019_2': {
'name': 'WXH AK IOI 2019',
'icon': 'orz.gif',
'image': 'default.png',
'text': 'N大佬wxh\nAKIOI2019',
'game_define': 'general',
},
# 'lcaggctsc2018_2':{
# 'name':'CommonAnts GG CTSC 2018',
# 'icon':'commonants_icon.png',
# 'image':'commonants_image.png',
# 'text':'N蒟蒻CommonAnts\nGG CTSC 2018',
# 'game_define':'general',
# },
# 'random_3':{
# 'name':'RANDOM',
# 'icon':'default.png',
# 'image':'default.png',
# 'text':'N随机角色',
# 'game_define':'general',
# },
# 'wxhakioi2019_3':{
# 'name':'WXH AK IOI 2019',
# 'icon':'orz.gif',
# 'image':'default.png',
# 'text':'N大佬wxh\nAKIOI2019',
# 'game_define':'general',
# },
# 'lcaggctsc2018_3':{
# 'name':'CommonAnts GG CTSC 2018',
# 'icon':'commonants_icon.png',
# 'image':'commonants_image.png',
# 'text':'N蒟蒻CommonAnts\nGG CTSC 2018',
# 'game_define':'general',
# }
}
}
| 25.544444
| 129
| 0.563288
| 231
| 2,299
| 5.480519
| 0.151515
| 0.094787
| 0.161137
| 0.120063
| 0.847551
| 0.847551
| 0.847551
| 0.847551
| 0.847551
| 0.847551
| 0
| 0.058431
| 0.218356
| 2,299
| 89
| 130
| 25.831461
| 0.646077
| 0.274467
| 0
| 0.65
| 0
| 0
| 0.582957
| 0.07295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2df6670e16b38de17f37fcb9d94c809ef0b6a3f3
| 69
|
py
|
Python
|
javascript/191128/test_191128.py
|
hbyyy/TIL
|
e89ae2913a8a38eb7f480a9ec2324c3ac11e309e
|
[
"MIT"
] | null | null | null |
javascript/191128/test_191128.py
|
hbyyy/TIL
|
e89ae2913a8a38eb7f480a9ec2324c3ac11e309e
|
[
"MIT"
] | 1
|
2022-03-26T07:50:54.000Z
|
2022-03-26T07:50:54.000Z
|
javascript/191128/test_191128.py
|
hbyyy/TIL
|
e89ae2913a8a38eb7f480a9ec2324c3ac11e309e
|
[
"MIT"
] | null | null | null |
c = 1
s = c
print(id(c), id(s))
s = 3
print(id(c), id(s))
print(c, s)
| 11.5
| 19
| 0.507246
| 19
| 69
| 1.842105
| 0.315789
| 0.4
| 0.457143
| 0.571429
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.202899
| 69
| 6
| 20
| 11.5
| 0.6
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
93561d34658c9f223845a9481025831986359090
| 13,849
|
py
|
Python
|
hyde/tests/test_plugin.py
|
dcode/hyde
|
7ce58157a9e74cc767cd602097441b8424a2052f
|
[
"MIT"
] | 1
|
2019-01-03T00:52:22.000Z
|
2019-01-03T00:52:22.000Z
|
hyde/tests/test_plugin.py
|
eliethesaiyan/hyde
|
7ce58157a9e74cc767cd602097441b8424a2052f
|
[
"MIT"
] | null | null | null |
hyde/tests/test_plugin.py
|
eliethesaiyan/hyde
|
7ce58157a9e74cc767cd602097441b8424a2052f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.exceptions import HydeException
from hyde.fs import File, Folder
from hyde.generator import Generator
from hyde.plugin import Plugin
from hyde.site import Site
from mock import patch
from nose.tools import raises, nottest, with_setup
TEST_SITE = File(__file__).parent.child_folder('_test')
class PluginLoaderStub(Plugin):
pass
class NoReturnPlugin(Plugin):
def begin_text_resource(self, resource, text):
print "NoReturnPlugin"
return None
class ConstantReturnPlugin(Plugin):
def begin_text_resource(self, resource, text):
print "ConstantReturnPlugin"
return "Jam"
class TestPlugins(object):
@classmethod
def setup_class(cls):
TEST_SITE.make()
TEST_SITE.parent.child_folder('sites/test_jinja').copy_contents_to(TEST_SITE)
folders = []
text_files = []
binary_files = []
with TEST_SITE.child_folder('content').walker as walker:
@walker.folder_visitor
def visit_folder(folder):
folders.append(folder.path)
@walker.file_visitor
def visit_file(afile):
if not afile.is_text:
binary_files.append(afile.path)
else:
text_files.append(afile.path)
cls.content_nodes = sorted(folders)
cls.content_text_resources = sorted(text_files)
cls.content_binary_resources = sorted(binary_files)
@classmethod
def teardown_class(cls):
TEST_SITE.delete()
def setUp(self):
self.site = Site(TEST_SITE)
self.site.config.plugins = ['hyde.tests.test_plugin.PluginLoaderStub']
def test_can_load_plugin_modules(self):
assert not len(self.site.plugins)
Plugin.load_all(self.site)
assert len(self.site.plugins) == 1
assert self.site.plugins[0].__class__.__name__ == 'PluginLoaderStub'
def test_generator_loads_plugins(self):
gen = Generator(self.site)
assert len(self.site.plugins) == 1
def test_generator_template_registered_called(self):
with patch.object(PluginLoaderStub, 'template_loaded') as template_loaded_stub:
gen = Generator(self.site)
gen.generate_all()
assert template_loaded_stub.call_count == 1
def test_generator_template_begin_generation_called(self):
with patch.object(PluginLoaderStub, 'begin_generation') as begin_generation_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_generation_stub.call_count == 1
def test_generator_template_begin_generation_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'begin_generation') as begin_generation_stub:
gen = Generator(self.site)
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert begin_generation_stub.call_count == 1
def test_generator_template_begin_generation_called_for_single_node(self):
with patch.object(PluginLoaderStub, 'begin_generation') as begin_generation_stub:
gen = Generator(self.site)
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert begin_generation_stub.call_count == 1
def test_generator_template_generation_complete_called(self):
with patch.object(PluginLoaderStub, 'generation_complete') as generation_complete_stub:
gen = Generator(self.site)
gen.generate_all()
assert generation_complete_stub.call_count == 1
def test_generator_template_generation_complete_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'generation_complete') as generation_complete_stub:
gen = Generator(self.site)
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert generation_complete_stub.call_count == 1
def test_generator_template_generation_complete_called_for_single_node(self):
with patch.object(PluginLoaderStub, 'generation_complete') as generation_complete_stub:
gen = Generator(self.site)
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert generation_complete_stub.call_count == 1
def test_generator_template_begin_site_called(self):
with patch.object(PluginLoaderStub, 'begin_site') as begin_site_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_site_stub.call_count == 1
def test_generator_template_begin_site_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'begin_site') as begin_site_stub:
gen = Generator(self.site)
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert begin_site_stub.call_count == 1
def test_generator_template_begin_site_not_called_for_single_resource_second_time(self):
with patch.object(PluginLoaderStub, 'begin_site') as begin_site_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_site_stub.call_count == 1
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert begin_site_stub.call_count == 1
def test_generator_template_begin_site_called_for_single_node(self):
with patch.object(PluginLoaderStub, 'begin_site') as begin_site_stub:
gen = Generator(self.site)
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert begin_site_stub.call_count == 1
def test_generator_template_begin_site_not_called_for_single_node_second_time(self):
with patch.object(PluginLoaderStub, 'begin_site') as begin_site_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_site_stub.call_count == 1
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert begin_site_stub.call_count == 1
def test_generator_template_site_complete_called(self):
with patch.object(PluginLoaderStub, 'site_complete') as site_complete_stub:
gen = Generator(self.site)
gen.generate_all()
assert site_complete_stub.call_count == 1
def test_generator_template_site_complete_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'site_complete') as site_complete_stub:
gen = Generator(self.site)
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert site_complete_stub.call_count == 1
def test_generator_template_site_complete_not_called_for_single_resource_second_time(self):
with patch.object(PluginLoaderStub, 'site_complete') as site_complete_stub:
gen = Generator(self.site)
gen.generate_all()
assert site_complete_stub.call_count == 1
path = self.site.content.source_folder.child('about.html')
gen.generate_resource_at_path(path)
assert site_complete_stub.call_count == 1
def test_generator_template_site_complete_called_for_single_node(self):
with patch.object(PluginLoaderStub, 'site_complete') as site_complete_stub:
gen = Generator(self.site)
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert site_complete_stub.call_count == 1
def test_generator_template_site_complete_not_called_for_single_node_second_time(self):
with patch.object(PluginLoaderStub, 'site_complete') as site_complete_stub:
gen = Generator(self.site)
gen.generate_all()
path = self.site.content.source_folder
gen.generate_node_at_path(path)
assert site_complete_stub.call_count == 1
def test_generator_template_begin_node_called(self):
with patch.object(PluginLoaderStub, 'begin_node') as begin_node_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_node_stub.call_count == len(self.content_nodes)
called_with_nodes = sorted([arg[0][0].path for arg in begin_node_stub.call_args_list])
assert called_with_nodes == self.content_nodes
def test_generator_template_begin_node_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'begin_node') as begin_node_stub:
gen = Generator(self.site)
gen.generate_resource_at_path(self.site.content.source_folder.child('about.html'))
assert begin_node_stub.call_count == len(self.content_nodes)
def test_generator_template_begin_node_not_called_for_single_resource_second_time(self):
with patch.object(PluginLoaderStub, 'begin_node') as begin_node_stub:
gen = Generator(self.site)
gen.generate_all()
assert begin_node_stub.call_count == len(self.content_nodes)
gen.generate_resource_at_path(self.site.content.source_folder.child('about.html'))
assert begin_node_stub.call_count == len(self.content_nodes) # No extra calls
def test_generator_template_node_complete_called(self):
with patch.object(PluginLoaderStub, 'node_complete') as node_complete_stub:
gen = Generator(self.site)
gen.generate_all()
assert node_complete_stub.call_count == len(self.content_nodes)
called_with_nodes = sorted([arg[0][0].path for arg in node_complete_stub.call_args_list])
assert called_with_nodes == self.content_nodes
def test_generator_template_node_complete_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'node_complete') as node_complete_stub:
gen = Generator(self.site)
gen.generate_resource_at_path(self.site.content.source_folder.child('about.html'))
assert node_complete_stub.call_count == len(self.content_nodes)
def test_generator_template_node_complete_not_called_for_single_resource_second_time(self):
with patch.object(PluginLoaderStub, 'node_complete') as node_complete_stub:
gen = Generator(self.site)
gen.generate_all()
assert node_complete_stub.call_count == len(self.content_nodes)
gen.generate_resource_at_path(self.site.content.source_folder.child('about.html'))
assert node_complete_stub.call_count == len(self.content_nodes) # No extra calls
def test_generator_template_begin_text_resource_called(self):
with patch.object(PluginLoaderStub, 'begin_text_resource') as begin_text_resource_stub:
begin_text_resource_stub.reset_mock()
begin_text_resource_stub.return_value = ''
gen = Generator(self.site)
gen.generate_all()
called_with_resources = sorted([arg[0][0].path for arg in begin_text_resource_stub.call_args_list])
assert set(called_with_resources) == set(self.content_text_resources)
def test_generator_template_begin_text_resource_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'begin_text_resource') as begin_text_resource_stub:
begin_text_resource_stub.return_value = ''
gen = Generator(self.site)
gen.generate_all()
begin_text_resource_stub.reset_mock()
path = self.site.content.source_folder.child('about.html')
gen = Generator(self.site)
gen.generate_resource_at_path(path, incremental=True)
called_with_resources = sorted([arg[0][0].path for arg in begin_text_resource_stub.call_args_list])
assert begin_text_resource_stub.call_count == 1
assert called_with_resources[0] == path
def test_generator_template_begin_binary_resource_called(self):
with patch.object(PluginLoaderStub, 'begin_binary_resource') as begin_binary_resource_stub:
gen = Generator(self.site)
gen.generate_all()
called_with_resources = sorted([arg[0][0].path for arg in begin_binary_resource_stub.call_args_list])
assert begin_binary_resource_stub.call_count == len(self.content_binary_resources)
assert called_with_resources == self.content_binary_resources
def test_generator_template_begin_binary_resource_called_for_single_resource(self):
with patch.object(PluginLoaderStub, 'begin_binary_resource') as begin_binary_resource_stub:
gen = Generator(self.site)
gen.generate_all()
begin_binary_resource_stub.reset_mock()
path = self.site.content.source_folder.child('favicon.ico')
gen.generate_resource_at_path(path)
called_with_resources = sorted([arg[0][0].path for arg in begin_binary_resource_stub.call_args_list])
assert begin_binary_resource_stub.call_count == 1
assert called_with_resources[0] == path
def test_plugin_chaining(self):
self.site.config.plugins = [
'hyde.tests.test_plugin.ConstantReturnPlugin',
'hyde.tests.test_plugin.NoReturnPlugin'
]
path = self.site.content.source_folder.child('about.html')
gen = Generator(self.site)
gen.generate_resource_at_path(path)
about = File(Folder(
self.site.config.deploy_root_path).child('about.html'))
assert about.read_all() == "Jam"
| 41.713855
| 113
| 0.695357
| 1,718
| 13,849
| 5.232247
| 0.072177
| 0.051619
| 0.044833
| 0.066748
| 0.835799
| 0.832684
| 0.819891
| 0.81622
| 0.784069
| 0.765269
| 0
| 0.003722
| 0.223915
| 13,849
| 331
| 114
| 41.839879
| 0.83262
| 0.003683
| 0
| 0.617284
| 0
| 0
| 0.052972
| 0.011715
| 0
| 0
| 0
| 0
| 0.17284
| 0
| null | null | 0.004115
| 0.028807
| null | null | 0.00823
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa8253100bced21da04b8b4e4d2bdcd1ac5711d7
| 180
|
py
|
Python
|
lyricsearch/systemcheck.py
|
wmcooper2/lyricsearch
|
0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5
|
[
"MIT"
] | null | null | null |
lyricsearch/systemcheck.py
|
wmcooper2/lyricsearch
|
0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5
|
[
"MIT"
] | null | null | null |
lyricsearch/systemcheck.py
|
wmcooper2/lyricsearch
|
0aff7a32d240f6ba2ba1e21ae46d3ce79d13edd5
|
[
"MIT"
] | null | null | null |
"""System checking module."""
# stand lib
import os
def ismac() -> bool:
return os.uname().sysname == "Darwin"
def ispi() -> bool:
return os.uname().sysname == "Linux"
| 15
| 41
| 0.611111
| 23
| 180
| 4.782609
| 0.695652
| 0.181818
| 0.218182
| 0.309091
| 0.436364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 180
| 11
| 42
| 16.363636
| 0.763889
| 0.188889
| 0
| 0
| 0
| 0
| 0.078571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
faa0dafc76f68fab40c3ef872a6bdbb4f8baedb1
| 3,298
|
py
|
Python
|
acats/make_class_by_csv/script.py
|
lesywix/ACATS_parser
|
ae8524f9486208eccb5e58e840b43159f3c5eb78
|
[
"MIT"
] | null | null | null |
acats/make_class_by_csv/script.py
|
lesywix/ACATS_parser
|
ae8524f9486208eccb5e58e840b43159f3c5eb78
|
[
"MIT"
] | null | null | null |
acats/make_class_by_csv/script.py
|
lesywix/ACATS_parser
|
ae8524f9486208eccb5e58e840b43159f3c5eb78
|
[
"MIT"
] | null | null | null |
import csv
"""
use Tabule: https://github.com/tabulapdf/tabula to generate csv from PDF
then use script to generate class string and adjust manually
"""
def make_mt_class_str():
make_class_str = ''
count = 0
with open('make_class_by_csv/mro_mt.csv') as f:
r = csv.reader(f)
for i in r:
key = i[0]
if key == 'Record Type':
filler_count = 0
count += 1
class_name = f"\n\nclass MROTransferOutput{count:02}(BaseRecord):\n record_type = Field({i[2]}, {i[3]}, default='M')\n"
make_class_str += class_name
elif key == 'FIELD NAME':
continue
elif key != '':
if i[2] == '' or i[3] == '':
make_class_str = make_class_str.strip()
make_class_str, n, replace = make_class_str.rpartition('\n')
replace = replace.replace(' = ', f'_{key.replace(" ", "_").lower()} = ')
make_class_str = make_class_str + n + replace + '\n'
continue
if key == 'Record Subtype':
make_class_str += f" record_subtype = Field({i[2]}, {i[3]}, default='T')\n"
continue
i[2], i[3] = int(i[2]), int(i[3])
if key == 'Filler':
filler_count += 1
make_class_str += f" f{filler_count} = Field{i[2], i[3]}\n"
continue
make_class_str += f" {i[0].replace(' ', '_').replace('/', '_or_').lower()} = Field{i[2], i[3]}\n"
print(make_class_str)
def make_ma_class_str():
make_class_str = ''
count = 0
with open('make_class_by_csv/mro_ma.csv') as f:
r = csv.reader(f)
for i in r:
key = i[0]
if key == 'Record Type':
filler_count = 0
count += 1
class_name = f"\n\nclass MROAssetOutput{count:02}(BaseRecord):\n record_type = Field({i[2]}, {i[3]}, default='M')\n"
make_class_str += class_name
elif key == 'FIELD NAME':
continue
elif key != '':
if i[2] == '' or i[3] == '':
make_class_str = make_class_str.strip()
make_class_str, n, replace = make_class_str.rpartition('\n')
replace = replace.replace(' = ', f'_{key.replace(" ", "_").lower()} = ')
make_class_str = make_class_str + n + replace + '\n'
continue
if key == 'Record Subtype':
make_class_str += f" record_subtype = Field({i[2]}, {i[3]}, default='A')\n"
continue
try:
i[2], i[3] = int(i[2]), int(i[3])
except Exception:
pass
if key == 'Filler':
filler_count += 1
make_class_str += f" f{filler_count} = Field{i[2], i[3]}\n"
continue
make_class_str += f" {i[0].replace(' ', '_').replace('/', '_or_').lower()} = Field{i[2], i[3]}\n"
print(make_class_str)
if __name__ == '__main__':
# make_mt_class_str()
make_ma_class_str()
| 38.8
| 138
| 0.463614
| 403
| 3,298
| 3.545906
| 0.181141
| 0.156753
| 0.20154
| 0.027992
| 0.841148
| 0.824353
| 0.824353
| 0.824353
| 0.824353
| 0.824353
| 0
| 0.021847
| 0.389327
| 3,298
| 84
| 139
| 39.261905
| 0.687686
| 0.005761
| 0
| 0.794118
| 1
| 0.058824
| 0.25287
| 0.043686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0.014706
| 0.014706
| 0
| 0.044118
| 0.029412
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
87835f2a3feaaab836342f293a188c4089b8fef9
| 120,532
|
py
|
Python
|
tiled-lutnet/training-software/microarchitectures/63lutnet.py
|
awai54st/LUTNet
|
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
|
[
"BSD-2-Clause"
] | 38
|
2019-10-28T10:06:33.000Z
|
2022-02-21T21:38:39.000Z
|
tiled-lutnet/training-software/microarchitectures/63lutnet.py
|
awai54st/LUTNet
|
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
|
[
"BSD-2-Clause"
] | null | null | null |
tiled-lutnet/training-software/microarchitectures/63lutnet.py
|
awai54st/LUTNet
|
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
|
[
"BSD-2-Clause"
] | 13
|
2019-10-28T10:17:48.000Z
|
2021-08-10T21:37:11.000Z
|
import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib
import tensorflow as tf
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Convolution2D, Activation, Flatten, MaxPooling2D,Input,Dropout,GlobalAveragePooling2D
from keras import backend as K
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.optimizers import SGD
from keras.engine.topology import Layer
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
import os
from keras.layers.normalization import BatchNormalization
from tensorflow.python.framework import ops
#from multi_gpu import make_parallel
def binarize(x):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
clipped = K.clip(x,-1,1)
rounded = K.sign(clipped)
return clipped + K.stop_gradient(rounded - clipped)
class Residual_sign(Layer):
def __init__(self, levels=1,trainable=True,**kwargs):
self.levels=levels
self.trainable=trainable
super(Residual_sign, self).__init__(**kwargs)
def build(self, input_shape):
ars=np.arange(self.levels)+1.0
ars=ars[::-1]
means=ars/np.sum(ars)
#self.means=[K.variable(m) for m in means]
#self.trainable_weights=self.means
self.means = self.add_weight(name='means',
shape=(self.levels, ),
initializer=keras.initializers.Constant(value=means),
trainable=self.trainable) # Trainable scaling factors for residual binarisation
def call(self, x, mask=None):
resid = x
out_bin=0
if self.levels==1:
for l in range(self.levels):
#out=binarize(resid)*K.abs(self.means[l])
out=binarize(resid)*abs(self.means[l])
#out_bin=out_bin+out
out_bin=out_bin+out#no gamma per level
resid=resid-out
elif self.levels==2:
out=binarize(resid)*abs(self.means[0])
out_bin=out
resid=resid-out
out=binarize(resid)*abs(self.means[1])
out_bin=tf.stack([out_bin,out])
resid=resid-out
elif self.levels==3:
out=binarize(resid)*abs(self.means[0])
out_bin1=out
resid=resid-out
out=binarize(resid)*abs(self.means[1])
out_bin2=out
resid=resid-out
out=binarize(resid)*abs(self.means[2])
out_bin3=out
resid=resid-out
out_bin=tf.stack([out_bin1,out_bin2,out_bin3])
return out_bin
def get_output_shape_for(self,input_shape):
if self.levels==1:
return input_shape
else:
return (self.levels, input_shape)
def compute_output_shape(self,input_shape):
if self.levels==1:
return input_shape
else:
return (self.levels, input_shape)
def set_means(self,X):
means=np.zeros((self.levels))
means[0]=1
resid=np.clip(X,-1,1)
approx=0
for l in range(self.levels):
m=np.mean(np.absolute(resid))
out=np.sign(resid)*m
approx=approx+out
resid=resid-out
means[l]=m
err=np.mean((approx-np.clip(X,-1,1))**2)
means=means/np.sum(means)
sess=K.get_session()
sess.run(self.means.assign(means))
class binary_conv(Layer):
def __init__(self,nfilters,ch_in,k,padding,strides=(1,1),levels=1,pruning_prob=0,first_layer=False,LUT=True,BINARY=True,TRC=1,TM=1,TN=1,**kwargs):
self.nfilters=nfilters
self.ch_in=ch_in
self.k=k
self.padding=padding
if padding=='valid':
self.PADDING = "VALID" #tf uses upper-case padding notations whereas keras uses lower-case notations
elif padding=='same':
self.PADDING = "SAME"
self.strides=strides
self.levels=levels
self.first_layer=first_layer
self.LUT=LUT
self.BINARY=BINARY
self.window_size=self.ch_in*self.k*self.k
self.TRC = TRC
self.TM = TM
self.TN = TN
self.tile_size=[self.k/self.TRC,self.k/self.TRC,self.ch_in/self.TM,self.nfilters/self.TN]
#self.rand_map=np.random.randint(self.window_size, size=[self.window_size, 1]) # Randomisation map for subsequent input connections
super(binary_conv,self).__init__(**kwargs)
def build(self, input_shape):
self.rand_map_0 = self.add_weight(name='rand_map_0',
shape=(self.tile_size[0]*self.tile_size[1]*self.tile_size[2], 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.tile_size[0]*self.tile_size[1]*self.tile_size[2], size=[self.tile_size[0]*self.tile_size[1]*self.tile_size[2], 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_1 = self.add_weight(name='rand_map_1',
shape=(self.tile_size[0]*self.tile_size[1]*self.tile_size[2], 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.tile_size[0]*self.tile_size[1]*self.tile_size[2], size=[self.tile_size[0]*self.tile_size[1]*self.tile_size[2], 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_exp_0 = self.add_weight(name='rand_map_exp_0',
shape=(self.window_size, 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.window_size, size=[self.window_size, 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_exp_1 = self.add_weight(name='rand_map_exp_1',
shape=(self.window_size, 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.window_size, size=[self.window_size, 1])),
trainable=False) # Randomisation map for subsequent input connections
stdv=1/np.sqrt(self.k*self.k*self.ch_in)
self.gamma=K.variable(1.0)
# if self.first_layer==True:
# w1 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
# w2 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
# w3 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
# w4 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
#
# self.w1=K.variable(w1)
# self.w2=K.variable(w2)
# self.w3=K.variable(w3)
# self.w4=K.variable(w4)
# self.trainable_weights=[self.w1,self.w2,self.w3,self.w4,self.gamma]
if self.levels==1 or self.first_layer==True:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
elif self.levels==2:
if self.LUT==True:
w1 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w2 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w3 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
c1 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c2 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c3 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c4 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c5 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c6 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c7 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c8 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c9 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c10 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c11 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c12 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c13 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c14 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c15 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c16 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c17 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c18 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c19 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c20 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c21 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c22 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c23 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c24 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c25 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c26 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c27 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c28 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c29 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c30 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c31 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c32 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c33 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c34 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c35 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c36 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c37 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c38 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c39 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c40 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c41 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c42 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c43 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c44 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c45 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c46 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c47 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c48 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c49 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c50 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c51 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c52 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c53 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c54 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c55 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c56 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c57 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c58 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c59 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c60 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c61 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c62 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c63 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c64 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
# self.w1 = self.add_weight(name='w1',
# shape=(self.k,self.k,self.ch_in,self.nfilters),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)),
# trainable=False)
# self.w2 = self.add_weight(name='w2',
# shape=(self.k,self.k,self.ch_in,self.nfilters),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)),
# trainable=False)
# self.w3 = self.add_weight(name='w3',
# shape=(self.k,self.k,self.ch_in,self.nfilters),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)),
# trainable=False)
self.c1 =K.variable(c1)
self.c2 =K.variable(c2)
self.c3 =K.variable(c3)
self.c4 =K.variable(c4)
self.c5 =K.variable(c5)
self.c6 =K.variable(c6)
self.c7 =K.variable(c7)
self.c8 =K.variable(c8)
self.c9 =K.variable(c9)
self.c10=K.variable(c10)
self.c11=K.variable(c11)
self.c12=K.variable(c12)
self.c13=K.variable(c13)
self.c14=K.variable(c14)
self.c15=K.variable(c15)
self.c16=K.variable(c16)
self.c17=K.variable(c17)
self.c18=K.variable(c18)
self.c19=K.variable(c19)
self.c20=K.variable(c20)
self.c21=K.variable(c21)
self.c22=K.variable(c22)
self.c23=K.variable(c23)
self.c24=K.variable(c24)
self.c25=K.variable(c25)
self.c26=K.variable(c26)
self.c27=K.variable(c27)
self.c28=K.variable(c28)
self.c29=K.variable(c29)
self.c30=K.variable(c30)
self.c31=K.variable(c31)
self.c32=K.variable(c32)
self.c33=K.variable(c33)
self.c34=K.variable(c34)
self.c35=K.variable(c35)
self.c36=K.variable(c36)
self.c37=K.variable(c37)
self.c38=K.variable(c38)
self.c39=K.variable(c39)
self.c40=K.variable(c40)
self.c41=K.variable(c41)
self.c42=K.variable(c42)
self.c43=K.variable(c43)
self.c44=K.variable(c44)
self.c45=K.variable(c45)
self.c46=K.variable(c46)
self.c47=K.variable(c47)
self.c48=K.variable(c48)
self.c49=K.variable(c49)
self.c50=K.variable(c50)
self.c51=K.variable(c51)
self.c52=K.variable(c52)
self.c53=K.variable(c53)
self.c54=K.variable(c54)
self.c55=K.variable(c55)
self.c56=K.variable(c56)
self.c57=K.variable(c57)
self.c58=K.variable(c58)
self.c59=K.variable(c59)
self.c60=K.variable(c60)
self.c61=K.variable(c61)
self.c62=K.variable(c62)
self.c63=K.variable(c63)
self.c64=K.variable(c64)
self.w1 =K.variable(w1)
self.w2 =K.variable(w2)
self.w3 =K.variable(w3)
self.trainable_weights=[self.c1,self.c2,self.c3,self.c4,self.c5,self.c6,self.c7,self.c8,self.c9,self.c10,self.c11,self.c12,self.c13,self.c14,self.c15,self.c16,
self.c17,self.c18,self.c19,self.c20,self.c21,self.c22,self.c23,self.c24,self.c25,self.c26,self.c27,self.c28,self.c29,self.c30,self.c31,self.c32,
self.c33,self.c34,self.c35,self.c36,self.c37,self.c38,self.c39,self.c40,self.c41,self.c42,self.c43,self.c44,self.c45,self.c46,self.c47,self.c48,
self.c49,self.c50,self.c51,self.c52,self.c53,self.c54,self.c55,self.c56,self.c57,self.c58,self.c59,self.c60,self.c61,self.c62,self.c63,self.c64,
self.w1,self.w2,self.w3,self.gamma]
else:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
elif self.levels==3:
if self.LUT==True:
w1 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w2 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w3 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w4 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w5 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w6 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w7 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
w8 = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
self.w1=K.variable(w1)
self.w2=K.variable(w2)
self.w3=K.variable(w3)
self.w4=K.variable(w4)
self.w5=K.variable(w5)
self.w6=K.variable(w6)
self.w7=K.variable(w7)
self.w8=K.variable(w8)
self.trainable_weights=[self.w1,self.w2,self.w3,self.w4,self.w5,self.w6,self.w7,self.w8,self.gamma]
else:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
self.pruning_mask = self.add_weight(name='pruning_mask',
shape=(self.tile_size[0]*self.tile_size[1]*self.tile_size[2],self.tile_size[3]),
initializer=keras.initializers.Constant(value=np.ones((self.tile_size[0]*self.tile_size[1]*self.tile_size[2],self.tile_size[3]))),
trainable=False) # LUT pruning based on whether inputs get repeated
# if keras.backend._backend=="mxnet":
# w=w.transpose(3,2,0,1)
# if self.levels==1:#train baseline with no resid gamma scaling
# w = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
# self.w=K.variable(w)
# self.trainable_weights=[self.w,self.gamma]
# elif self.levels==2:
# w = np.random.normal(loc=0.0, scale=stdv,size=[self.k,self.k,self.ch_in,self.nfilters]).astype(np.float32)
# self.w=K.variable(w)
# self.trainable_weights=[self.w,self.gamma]
def call(self, x,mask=None):
constraint_gamma=K.abs(self.gamma)#K.clip(self.gamma,0.01,10)
if self.levels==1 or self.first_layer==True:
if self.BINARY==False:
self.clamped_w=constraint_gamma*K.clip(self.w,-1,1)
else:
self.clamped_w=constraint_gamma*binarize(self.w)
elif self.levels==2:
if self.LUT==True:
if self.BINARY==False:
self.clamped_w1 =K.clip(self.w1,-1,1)
self.clamped_w2 =K.clip(self.w2,-1,1)
self.clamped_w3 =K.clip(self.w3,-1,1)
self.clamped_c1 =constraint_gamma*K.clip(tf.tile(self.c1, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c2 =constraint_gamma*K.clip(tf.tile(self.c2, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c3 =constraint_gamma*K.clip(tf.tile(self.c3, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c4 =constraint_gamma*K.clip(tf.tile(self.c4, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c5 =constraint_gamma*K.clip(tf.tile(self.c5, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c6 =constraint_gamma*K.clip(tf.tile(self.c6, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c7 =constraint_gamma*K.clip(tf.tile(self.c7, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c8 =constraint_gamma*K.clip(tf.tile(self.c8, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c9 =constraint_gamma*K.clip(tf.tile(self.c9, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c10=constraint_gamma*K.clip(tf.tile(self.c10, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c11=constraint_gamma*K.clip(tf.tile(self.c11, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c12=constraint_gamma*K.clip(tf.tile(self.c12, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c13=constraint_gamma*K.clip(tf.tile(self.c13, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c14=constraint_gamma*K.clip(tf.tile(self.c14, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c15=constraint_gamma*K.clip(tf.tile(self.c15, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c16=constraint_gamma*K.clip(tf.tile(self.c16, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c17=constraint_gamma*K.clip(tf.tile(self.c17, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c18=constraint_gamma*K.clip(tf.tile(self.c18, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c19=constraint_gamma*K.clip(tf.tile(self.c19, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c20=constraint_gamma*K.clip(tf.tile(self.c20, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c21=constraint_gamma*K.clip(tf.tile(self.c21, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c22=constraint_gamma*K.clip(tf.tile(self.c22, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c23=constraint_gamma*K.clip(tf.tile(self.c23, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c24=constraint_gamma*K.clip(tf.tile(self.c24, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c25=constraint_gamma*K.clip(tf.tile(self.c25, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c26=constraint_gamma*K.clip(tf.tile(self.c26, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c27=constraint_gamma*K.clip(tf.tile(self.c27, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c28=constraint_gamma*K.clip(tf.tile(self.c28, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c29=constraint_gamma*K.clip(tf.tile(self.c29, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c30=constraint_gamma*K.clip(tf.tile(self.c30, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c31=constraint_gamma*K.clip(tf.tile(self.c31, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c32=constraint_gamma*K.clip(tf.tile(self.c32, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c33=constraint_gamma*K.clip(tf.tile(self.c33, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c34=constraint_gamma*K.clip(tf.tile(self.c34, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c35=constraint_gamma*K.clip(tf.tile(self.c35, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c36=constraint_gamma*K.clip(tf.tile(self.c36, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c37=constraint_gamma*K.clip(tf.tile(self.c37, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c38=constraint_gamma*K.clip(tf.tile(self.c38, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c39=constraint_gamma*K.clip(tf.tile(self.c39, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c40=constraint_gamma*K.clip(tf.tile(self.c40, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c41=constraint_gamma*K.clip(tf.tile(self.c41, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c42=constraint_gamma*K.clip(tf.tile(self.c42, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c43=constraint_gamma*K.clip(tf.tile(self.c43, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c44=constraint_gamma*K.clip(tf.tile(self.c44, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c45=constraint_gamma*K.clip(tf.tile(self.c45, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c46=constraint_gamma*K.clip(tf.tile(self.c46, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c47=constraint_gamma*K.clip(tf.tile(self.c47, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c48=constraint_gamma*K.clip(tf.tile(self.c48, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c49=constraint_gamma*K.clip(tf.tile(self.c49, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c50=constraint_gamma*K.clip(tf.tile(self.c50, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c51=constraint_gamma*K.clip(tf.tile(self.c51, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c52=constraint_gamma*K.clip(tf.tile(self.c52, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c53=constraint_gamma*K.clip(tf.tile(self.c53, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c54=constraint_gamma*K.clip(tf.tile(self.c54, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c55=constraint_gamma*K.clip(tf.tile(self.c55, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c56=constraint_gamma*K.clip(tf.tile(self.c56, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c57=constraint_gamma*K.clip(tf.tile(self.c57, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c58=constraint_gamma*K.clip(tf.tile(self.c58, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c59=constraint_gamma*K.clip(tf.tile(self.c59, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c60=constraint_gamma*K.clip(tf.tile(self.c60, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c61=constraint_gamma*K.clip(tf.tile(self.c61, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c62=constraint_gamma*K.clip(tf.tile(self.c62, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c63=constraint_gamma*K.clip(tf.tile(self.c63, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
self.clamped_c64=constraint_gamma*K.clip(tf.tile(self.c64, [self.TRC,self.TRC,self.TM,self.TN]),-1,1)
else:
self.clamped_w1 =binarize(self.w1)
self.clamped_w2 =binarize(self.w2)
self.clamped_w3 =binarize(self.w3)
self.clamped_c1 =constraint_gamma*binarize(tf.tile(self.c1, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c2 =constraint_gamma*binarize(tf.tile(self.c2, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c3 =constraint_gamma*binarize(tf.tile(self.c3, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c4 =constraint_gamma*binarize(tf.tile(self.c4, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c5 =constraint_gamma*binarize(tf.tile(self.c5, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c6 =constraint_gamma*binarize(tf.tile(self.c6, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c7 =constraint_gamma*binarize(tf.tile(self.c7, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c8 =constraint_gamma*binarize(tf.tile(self.c8, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c9 =constraint_gamma*binarize(tf.tile(self.c9, [self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c10=constraint_gamma*binarize(tf.tile(self.c10,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c11=constraint_gamma*binarize(tf.tile(self.c11,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c12=constraint_gamma*binarize(tf.tile(self.c12,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c13=constraint_gamma*binarize(tf.tile(self.c13,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c14=constraint_gamma*binarize(tf.tile(self.c14,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c15=constraint_gamma*binarize(tf.tile(self.c15,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c16=constraint_gamma*binarize(tf.tile(self.c16,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c17=constraint_gamma*binarize(tf.tile(self.c17,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c18=constraint_gamma*binarize(tf.tile(self.c18,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c19=constraint_gamma*binarize(tf.tile(self.c19,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c20=constraint_gamma*binarize(tf.tile(self.c20,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c21=constraint_gamma*binarize(tf.tile(self.c21,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c22=constraint_gamma*binarize(tf.tile(self.c22,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c23=constraint_gamma*binarize(tf.tile(self.c23,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c24=constraint_gamma*binarize(tf.tile(self.c24,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c25=constraint_gamma*binarize(tf.tile(self.c25,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c26=constraint_gamma*binarize(tf.tile(self.c26,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c27=constraint_gamma*binarize(tf.tile(self.c27,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c28=constraint_gamma*binarize(tf.tile(self.c28,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c29=constraint_gamma*binarize(tf.tile(self.c29,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c30=constraint_gamma*binarize(tf.tile(self.c30,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c31=constraint_gamma*binarize(tf.tile(self.c31,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c32=constraint_gamma*binarize(tf.tile(self.c32,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c33=constraint_gamma*binarize(tf.tile(self.c33,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c34=constraint_gamma*binarize(tf.tile(self.c34,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c35=constraint_gamma*binarize(tf.tile(self.c35,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c36=constraint_gamma*binarize(tf.tile(self.c36,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c37=constraint_gamma*binarize(tf.tile(self.c37,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c38=constraint_gamma*binarize(tf.tile(self.c38,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c39=constraint_gamma*binarize(tf.tile(self.c39,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c40=constraint_gamma*binarize(tf.tile(self.c40,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c41=constraint_gamma*binarize(tf.tile(self.c41,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c42=constraint_gamma*binarize(tf.tile(self.c42,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c43=constraint_gamma*binarize(tf.tile(self.c43,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c44=constraint_gamma*binarize(tf.tile(self.c44,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c45=constraint_gamma*binarize(tf.tile(self.c45,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c46=constraint_gamma*binarize(tf.tile(self.c46,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c47=constraint_gamma*binarize(tf.tile(self.c47,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c48=constraint_gamma*binarize(tf.tile(self.c48,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c49=constraint_gamma*binarize(tf.tile(self.c49,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c50=constraint_gamma*binarize(tf.tile(self.c50,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c51=constraint_gamma*binarize(tf.tile(self.c51,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c52=constraint_gamma*binarize(tf.tile(self.c52,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c53=constraint_gamma*binarize(tf.tile(self.c53,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c54=constraint_gamma*binarize(tf.tile(self.c54,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c55=constraint_gamma*binarize(tf.tile(self.c55,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c56=constraint_gamma*binarize(tf.tile(self.c56,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c57=constraint_gamma*binarize(tf.tile(self.c57,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c58=constraint_gamma*binarize(tf.tile(self.c58,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c59=constraint_gamma*binarize(tf.tile(self.c59,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c60=constraint_gamma*binarize(tf.tile(self.c60,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c61=constraint_gamma*binarize(tf.tile(self.c61,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c62=constraint_gamma*binarize(tf.tile(self.c62,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c63=constraint_gamma*binarize(tf.tile(self.c63,[self.TRC,self.TRC,self.TM,self.TN]))
self.clamped_c64=constraint_gamma*binarize(tf.tile(self.c64,[self.TRC,self.TRC,self.TM,self.TN]))
else:
if self.BINARY==False:
self.clamped_w=constraint_gamma*K.clip(self.w,-1,1)
else:
self.clamped_w=constraint_gamma*binarize(self.w)
elif self.levels==3:
if self.LUT==True:
self.clamped_w1=constraint_gamma*binarize(self.w1)
self.clamped_w2=constraint_gamma*binarize(self.w2)
self.clamped_w3=constraint_gamma*binarize(self.w3)
self.clamped_w4=constraint_gamma*binarize(self.w4)
self.clamped_w5=constraint_gamma*binarize(self.w5)
self.clamped_w6=constraint_gamma*binarize(self.w6)
self.clamped_w7=constraint_gamma*binarize(self.w7)
self.clamped_w8=constraint_gamma*binarize(self.w8)
else:
self.clamped_w=constraint_gamma*binarize(self.w)
# if self.levels==1:#train baseline with no resid gamma scaling
# self.clamped_w=constraint_gamma*binarize(self.w)
# #self.clamped_w=binarize(self.w)#no gamma per weight channel
# elif self.levels==2:
# self.clamped_w=constraint_gamma*binarize(self.w)
if keras.__version__[0]=='2':
if self.levels==1 or self.first_layer==True:
self.out=K.conv2d(x, kernel=self.clamped_w*tf.tile(tf.reshape(self.pruning_mask, self.tile_size), [self.TRC,self.TRC,self.TM,self.TN]), padding=self.padding,strides=self.strides )
elif self.levels==2:
if self.LUT==True:
x0_patches = tf.extract_image_patches(x[0,:,:,:,:],
[1, self.k, self.k, 1],
[1, self.strides[0], self.strides[1], 1], [1, 1, 1, 1],
padding=self.PADDING)
x1_patches = tf.extract_image_patches(x[1,:,:,:,:],
[1, self.k, self.k, 1],
[1, self.strides[0], self.strides[1], 1], [1, 1, 1, 1],
padding=self.PADDING)
# Special hack for randomising the subsequent input connections: tensorflow does not support advanced matrix indexing
x0_shuf_patches=tf.transpose(x0_patches, perm=[3, 0, 1, 2])
x0_shuf_patches_0 = tf.gather_nd(x0_shuf_patches, tf.cast(self.rand_map_exp_0, tf.int32))
x0_shuf_patches_0=tf.transpose(x0_shuf_patches_0, perm=[1, 2, 3, 0])
x0_shuf_patches_1 = tf.gather_nd(x0_shuf_patches, tf.cast(self.rand_map_exp_1, tf.int32))
x0_shuf_patches_1=tf.transpose(x0_shuf_patches_1, perm=[1, 2, 3, 0])
x1_shuf_patches=tf.transpose(x1_patches, perm=[3, 0, 1, 2])
x1_shuf_patches_0 = tf.gather_nd(x1_shuf_patches, tf.cast(self.rand_map_exp_0, tf.int32))
x1_shuf_patches_0=tf.transpose(x1_shuf_patches_0, perm=[1, 2, 3, 0])
x1_shuf_patches_1 = tf.gather_nd(x1_shuf_patches, tf.cast(self.rand_map_exp_1, tf.int32))
x1_shuf_patches_1=tf.transpose(x1_shuf_patches_1, perm=[1, 2, 3, 0])
x0_pos=(1+binarize(x0_patches))/2*abs(x0_patches)
x0_neg=(1-binarize(x0_patches))/2*abs(x0_patches)
x1_pos=(1+binarize(x1_patches))/2*abs(x1_patches)
x1_neg=(1-binarize(x1_patches))/2*abs(x1_patches)
x0s0_pos=(1+binarize(x0_shuf_patches_0))/2#*abs(x0_shuf_patches_0)
x0s0_neg=(1-binarize(x0_shuf_patches_0))/2#*abs(x0_shuf_patches_0)
x1s0_pos=(1+binarize(x1_shuf_patches_0))/2#*abs(x1_shuf_patches_0)
x1s0_neg=(1-binarize(x1_shuf_patches_0))/2#*abs(x1_shuf_patches_0)
x0s1_pos=(1+binarize(x0_shuf_patches_1))/2#*abs(x0_shuf_patches_0)
x0s1_neg=(1-binarize(x0_shuf_patches_1))/2#*abs(x0_shuf_patches_0)
x1s1_pos=(1+binarize(x1_shuf_patches_1))/2#*abs(x1_shuf_patches_0)
x1s1_neg=(1-binarize(x1_shuf_patches_1))/2#*abs(x1_shuf_patches_0)
ws0_pos=(1+binarize(self.clamped_w1))/2
ws0_neg=(1-binarize(self.clamped_w1))/2
ws1_pos=(1+binarize(self.clamped_w2))/2
ws1_neg=(1-binarize(self.clamped_w2))/2
ws2_pos=(1+binarize(self.clamped_w3))/2
ws2_neg=(1-binarize(self.clamped_w3))/2
self.out= K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c1 *ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c2 *ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c3 *ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c4 *ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c5 *ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c6 *ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c7 *ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c8 *ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c9 *ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c10*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c11*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c12*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c13*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c14*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c15*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c16*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c17*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c18*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c19*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c20*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c21*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c22*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c23*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c24*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c25*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c26*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c27*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c28*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c29*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c30*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c31*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_pos*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c32*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c33*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c34*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c35*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c36*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c37*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c38*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c39*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_pos, tf.reshape(self.clamped_c40*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c41*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c42*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c43*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c44*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c45*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c46*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c47*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_pos*x0s1_neg, tf.reshape(self.clamped_c48*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c49*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c50*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c51*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c52*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c53*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c54*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c55*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_pos, tf.reshape(self.clamped_c56*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c57*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c58*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c59*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c60*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c61*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c62*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c63*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x0_neg*x0s0_neg*x0s1_neg, tf.reshape(self.clamped_c64*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c1 *ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c2 *ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c3 *ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c4 *ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c5 *ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c6 *ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c7 *ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c8 *ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c9 *ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c10*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c11*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c12*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c13*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c14*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c15*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c16*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c17*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c18*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c19*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c20*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c21*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c22*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c23*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c24*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c25*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c26*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c27*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c28*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c29*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c30*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c31*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_pos*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c32*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c33*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c34*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c35*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c36*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c37*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c38*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c39*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_pos, tf.reshape(self.clamped_c40*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c41*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c42*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c43*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c44*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c45*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c46*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c47*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_pos*x1s1_neg, tf.reshape(self.clamped_c48*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c49*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c50*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c51*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c52*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c53*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c54*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c55*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_pos, tf.reshape(self.clamped_c56*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c57*ws0_pos*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c58*ws0_pos*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c59*ws0_pos*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c60*ws0_pos*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c61*ws0_neg*ws1_pos*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c62*ws0_neg*ws1_pos*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c63*ws0_neg*ws1_neg*ws2_pos*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
self.out=self.out+K.dot(x1_neg*x1s0_neg*x1s1_neg, tf.reshape(self.clamped_c64*ws0_neg*ws1_neg*ws2_neg*tf.tile(tf.reshape(self.pruning_mask,self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), [-1, self.nfilters]))
#self.out=K.conv2d(x_pos[0,:,:,:,:]*xs_pos[0,:,:,:,:], kernel=self.clamped_w1, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_pos[0,:,:,:,:]*xs_neg[0,:,:,:,:], kernel=self.clamped_w2, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*xs_pos[0,:,:,:,:], kernel=self.clamped_w3, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*xs_neg[0,:,:,:,:], kernel=self.clamped_w4, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_pos[1,:,:,:,:]*xs_pos[1,:,:,:,:], kernel=self.clamped_w5, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_pos[1,:,:,:,:]*xs_neg[1,:,:,:,:], kernel=self.clamped_w6, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_neg[1,:,:,:,:]*xs_pos[1,:,:,:,:], kernel=self.clamped_w7, padding=self.padding,strides=self.strides )
#self.out=self.out+K.conv2d(x_neg[1,:,:,:,:]*xs_neg[1,:,:,:,:], kernel=self.clamped_w8, padding=self.padding,strides=self.strides )
else:
x_expanded=0
for l in range(self.levels):
x_in=x[l,:,:,:,:]
x_expanded=x_expanded+x_in
self.out=K.conv2d(x_expanded, kernel=self.clamped_w*tf.tile(tf.reshape(self.pruning_mask, self.tile_size),[self.TRC,self.TRC,self.TM,self.TN]), padding=self.padding,strides=self.strides )
elif self.levels==3:
if self.LUT==True:
x_pos=(1+x)/2
x_neg=(1-x)/2
self.out=K.conv2d(x_pos[0,:,:,:,:]*x_pos[1,:,:,:,:]*x_pos[2,:,:,:,:], kernel=self.clamped_w1, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_pos[0,:,:,:,:]*x_pos[1,:,:,:,:]*x_neg[2,:,:,:,:], kernel=self.clamped_w2, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_pos[0,:,:,:,:]*x_neg[1,:,:,:,:]*x_pos[2,:,:,:,:], kernel=self.clamped_w3, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_pos[0,:,:,:,:]*x_neg[1,:,:,:,:]*x_neg[2,:,:,:,:], kernel=self.clamped_w4, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*x_pos[1,:,:,:,:]*x_pos[2,:,:,:,:], kernel=self.clamped_w5, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*x_pos[1,:,:,:,:]*x_neg[2,:,:,:,:], kernel=self.clamped_w6, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*x_neg[1,:,:,:,:]*x_pos[2,:,:,:,:], kernel=self.clamped_w7, padding=self.padding,strides=self.strides )
self.out=self.out+K.conv2d(x_neg[0,:,:,:,:]*x_neg[1,:,:,:,:]*x_neg[2,:,:,:,:], kernel=self.clamped_w8, padding=self.padding,strides=self.strides )
else:
x_expanded=0
for l in range(self.levels):
x_in=x[l,:,:,:,:]
x_expanded=x_expanded+x_in
self.out=K.conv2d(x_expanded, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
if keras.__version__[0]=='1':
if self.levels==1:
self.out=K.conv2d(x, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
else:
for l in range(self.levels):
x_expanded=x_expanded+x[l,:,:,:,:]
self.out=K.conv2d(x_expanded, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
# if keras.__version__[0]=='2':#train baseline with no resid gamma scaling
# if self.levels==1:
# self.out=K.conv2d(x, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
# elif self.levels==2:
# x_expanded=0
# for l in range(self.levels):
# x_in=x[l,:,:,:,:]
# x_expanded=x_expanded+x_in
# self.out=K.conv2d(x_expanded, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
# if keras.__version__[0]=='1':
# if self.levels==1:
# self.out=K.conv2d(x, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
# else:
# for l in range(self.levels):
# x_expanded=x_expanded+x[l,:,:,:,:]
# self.out=K.conv2d(x_expanded, kernel=self.clamped_w, padding=self.padding,strides=self.strides )
self.output_dim=self.out.get_shape()
return self.out
def get_output_shape_for(self,input_shape):
return (input_shape[0], self.output_dim[1],self.output_dim[2],self.output_dim[3])
def compute_output_shape(self,input_shape):
return (input_shape[0], self.output_dim[1],self.output_dim[2],self.output_dim[3])
class binary_dense(Layer):
def __init__(self,n_in,n_out,levels=1,pruning_prob=0,first_layer=False,LUT=True,BINARY=True,TM=1,TN=1,**kwargs):
self.n_in=n_in
self.n_out=n_out
self.levels=levels
self.LUT=LUT
self.BINARY=BINARY
self.first_layer=first_layer
self.TM = TM
self.TN = TN
self.tile_size = [n_in/TM, n_out/TN]
super(binary_dense,self).__init__(**kwargs)
def build(self, input_shape):
self.rand_map_0 = self.add_weight(name='rand_map_0',
shape=(self.tile_size[0], 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.tile_size[0], size=[self.tile_size[0], 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_1 = self.add_weight(name='rand_map_1',
shape=(self.tile_size[0], 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.tile_size[0], size=[self.tile_size[0], 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_exp_0 = self.add_weight(name='rand_map_exp_0',
shape=(self.n_in, 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.n_in, size=[self.n_in, 1])),
trainable=False) # Randomisation map for subsequent input connections
self.rand_map_exp_1 = self.add_weight(name='rand_map_exp_1',
shape=(self.n_in, 1),
initializer=keras.initializers.Constant(value=np.random.randint(self.n_in, size=[self.n_in, 1])),
trainable=False) # Randomisation map for subsequent input connections
stdv=1/np.sqrt(self.n_in)
self.gamma=K.variable(1.0)
if self.levels==1 or self.first_layer==True:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
elif self.levels==2:
if self.LUT==True:
w1 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w2 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w3 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
c1 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c2 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c3 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c4 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c5 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c6 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c7 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c8 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c9 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c10 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c11 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c12 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c13 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c14 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c15 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c16 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c17 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c18 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c19 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c20 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c21 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c22 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c23 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c24 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c25 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c26 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c27 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c28 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c29 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c30 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c31 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c32 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c33 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c34 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c35 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c36 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c37 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c38 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c39 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c40 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c41 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c42 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c43 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c44 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c45 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c46 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c47 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c48 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c49 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c50 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c51 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c52 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c53 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c54 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c55 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c56 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c57 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c58 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c59 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c60 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c61 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c62 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c63 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
c64 = np.random.normal(loc=0.0, scale=stdv,size=self.tile_size).astype(np.float32)
# self.w1 = self.add_weight(name='w1',
# shape=(self.n_in,self.n_out),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)),
# trainable=False)
# self.w2 = self.add_weight(name='w2',
# shape=(self.n_in,self.n_out),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)),
# trainable=False)
# self.w3 = self.add_weight(name='w3',
# shape=(self.n_in,self.n_out),
# initializer=keras.initializers.Constant(value=np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)),
# trainable=False)
self.c1 =K.variable(c1)
self.c2 =K.variable(c2)
self.c3 =K.variable(c3)
self.c4 =K.variable(c4)
self.c5 =K.variable(c5)
self.c6 =K.variable(c6)
self.c7 =K.variable(c7)
self.c8 =K.variable(c8)
self.c9 =K.variable(c9)
self.c10=K.variable(c10)
self.c11=K.variable(c11)
self.c12=K.variable(c12)
self.c13=K.variable(c13)
self.c14=K.variable(c14)
self.c15=K.variable(c15)
self.c16=K.variable(c16)
self.c17=K.variable(c17)
self.c18=K.variable(c18)
self.c19=K.variable(c19)
self.c20=K.variable(c20)
self.c21=K.variable(c21)
self.c22=K.variable(c22)
self.c23=K.variable(c23)
self.c24=K.variable(c24)
self.c25=K.variable(c25)
self.c26=K.variable(c26)
self.c27=K.variable(c27)
self.c28=K.variable(c28)
self.c29=K.variable(c29)
self.c30=K.variable(c30)
self.c31=K.variable(c31)
self.c32=K.variable(c32)
self.c33=K.variable(c33)
self.c34=K.variable(c34)
self.c35=K.variable(c35)
self.c36=K.variable(c36)
self.c37=K.variable(c37)
self.c38=K.variable(c38)
self.c39=K.variable(c39)
self.c40=K.variable(c40)
self.c41=K.variable(c41)
self.c42=K.variable(c42)
self.c43=K.variable(c43)
self.c44=K.variable(c44)
self.c45=K.variable(c45)
self.c46=K.variable(c46)
self.c47=K.variable(c47)
self.c48=K.variable(c48)
self.c49=K.variable(c49)
self.c50=K.variable(c50)
self.c51=K.variable(c51)
self.c52=K.variable(c52)
self.c53=K.variable(c53)
self.c54=K.variable(c54)
self.c55=K.variable(c55)
self.c56=K.variable(c56)
self.c57=K.variable(c57)
self.c58=K.variable(c58)
self.c59=K.variable(c59)
self.c60=K.variable(c60)
self.c61=K.variable(c61)
self.c62=K.variable(c62)
self.c63=K.variable(c63)
self.c64=K.variable(c64)
self.w1 =K.variable(w1)
self.w2 =K.variable(w2)
self.w3 =K.variable(w3)
self.trainable_weights=[self.c1,self.c2,self.c3,self.c4,self.c5,self.c6,self.c7,self.c8,self.c9,self.c10,self.c11,self.c12,self.c13,self.c14,self.c15,self.c16,
self.c17,self.c18,self.c19,self.c20,self.c21,self.c22,self.c23,self.c24,self.c25,self.c26,self.c27,self.c28,self.c29,self.c30,self.c31,self.c32,
self.c33,self.c34,self.c35,self.c36,self.c37,self.c38,self.c39,self.c40,self.c41,self.c42,self.c43,self.c44,self.c45,self.c46,self.c47,self.c48,
self.c49,self.c50,self.c51,self.c52,self.c53,self.c54,self.c55,self.c56,self.c57,self.c58,self.c59,self.c60,self.c61,self.c62,self.c63,self.c64,
self.w1,self.w2,self.w3,self.gamma]
else:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
elif self.levels==3:
if self.LUT==True:
w1 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w2 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w3 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w4 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w5 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w6 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w7 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
w8 = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
self.w1=K.variable(w1)
self.w2=K.variable(w2)
self.w3=K.variable(w3)
self.w4=K.variable(w4)
self.w5=K.variable(w5)
self.w6=K.variable(w6)
self.w7=K.variable(w7)
self.w8=K.variable(w8)
self.trainable_weights=[self.w1,self.w2,self.w3,self.w4,self.w5,self.w6,self.w7,self.w8,self.gamma]
else:
w = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
self.w=K.variable(w)
self.trainable_weights=[self.w,self.gamma]
self.pruning_mask = self.add_weight(name='pruning_mask',
shape=self.tile_size,
initializer=keras.initializers.Constant(value=np.ones(self.tile_size)),
trainable=False) # LUT pruning based on whether inputs get repeated
# elif self.levels==2:#train baseline without resid gamma scaling
# w = np.random.normal(loc=0.0, scale=stdv,size=[self.n_in,self.n_out]).astype(np.float32)
# self.w=K.variable(w)
# self.trainable_weights=[self.w,self.gamma]
def call(self, x,mask=None):
constraint_gamma=K.abs(self.gamma)#K.clip(self.gamma,0.01,10)
if self.levels==1 or self.first_layer==True:
if self.BINARY==False:
self.clamped_w=constraint_gamma*K.clip(self.w,-1,1)
else:
self.clamped_w=constraint_gamma*binarize(self.w)
self.out=K.dot(x,self.clamped_w)
elif self.levels==2:
if self.LUT==True:
if self.BINARY==False:
self.clamped_w1=K.clip(self.w1,-1,1)
self.clamped_w2=K.clip(self.w2,-1,1)
self.clamped_w3=K.clip(self.w3,-1,1)
self.clamped_c1= constraint_gamma*K.clip(tf.tile(self.c1, [self.TM,self.TN]),-1,1)
self.clamped_c2= constraint_gamma*K.clip(tf.tile(self.c2, [self.TM,self.TN]),-1,1)
self.clamped_c3= constraint_gamma*K.clip(tf.tile(self.c3, [self.TM,self.TN]),-1,1)
self.clamped_c4= constraint_gamma*K.clip(tf.tile(self.c4, [self.TM,self.TN]),-1,1)
self.clamped_c5= constraint_gamma*K.clip(tf.tile(self.c5, [self.TM,self.TN]),-1,1)
self.clamped_c6= constraint_gamma*K.clip(tf.tile(self.c6, [self.TM,self.TN]),-1,1)
self.clamped_c7= constraint_gamma*K.clip(tf.tile(self.c7, [self.TM,self.TN]),-1,1)
self.clamped_c8= constraint_gamma*K.clip(tf.tile(self.c8, [self.TM,self.TN]),-1,1)
self.clamped_c9= constraint_gamma*K.clip(tf.tile(self.c9, [self.TM,self.TN]),-1,1)
self.clamped_c10=constraint_gamma*K.clip(tf.tile(self.c10,[self.TM,self.TN]),-1,1)
self.clamped_c11=constraint_gamma*K.clip(tf.tile(self.c11,[self.TM,self.TN]),-1,1)
self.clamped_c12=constraint_gamma*K.clip(tf.tile(self.c12,[self.TM,self.TN]),-1,1)
self.clamped_c13=constraint_gamma*K.clip(tf.tile(self.c13,[self.TM,self.TN]),-1,1)
self.clamped_c14=constraint_gamma*K.clip(tf.tile(self.c14,[self.TM,self.TN]),-1,1)
self.clamped_c15=constraint_gamma*K.clip(tf.tile(self.c15,[self.TM,self.TN]),-1,1)
self.clamped_c16=constraint_gamma*K.clip(tf.tile(self.c16,[self.TM,self.TN]),-1,1)
self.clamped_c17=constraint_gamma*K.clip(tf.tile(self.c17,[self.TM,self.TN]),-1,1)
self.clamped_c18=constraint_gamma*K.clip(tf.tile(self.c18,[self.TM,self.TN]),-1,1)
self.clamped_c19=constraint_gamma*K.clip(tf.tile(self.c19,[self.TM,self.TN]),-1,1)
self.clamped_c20=constraint_gamma*K.clip(tf.tile(self.c20,[self.TM,self.TN]),-1,1)
self.clamped_c21=constraint_gamma*K.clip(tf.tile(self.c21,[self.TM,self.TN]),-1,1)
self.clamped_c22=constraint_gamma*K.clip(tf.tile(self.c22,[self.TM,self.TN]),-1,1)
self.clamped_c23=constraint_gamma*K.clip(tf.tile(self.c23,[self.TM,self.TN]),-1,1)
self.clamped_c24=constraint_gamma*K.clip(tf.tile(self.c24,[self.TM,self.TN]),-1,1)
self.clamped_c25=constraint_gamma*K.clip(tf.tile(self.c25,[self.TM,self.TN]),-1,1)
self.clamped_c26=constraint_gamma*K.clip(tf.tile(self.c26,[self.TM,self.TN]),-1,1)
self.clamped_c27=constraint_gamma*K.clip(tf.tile(self.c27,[self.TM,self.TN]),-1,1)
self.clamped_c28=constraint_gamma*K.clip(tf.tile(self.c28,[self.TM,self.TN]),-1,1)
self.clamped_c29=constraint_gamma*K.clip(tf.tile(self.c29,[self.TM,self.TN]),-1,1)
self.clamped_c30=constraint_gamma*K.clip(tf.tile(self.c30,[self.TM,self.TN]),-1,1)
self.clamped_c31=constraint_gamma*K.clip(tf.tile(self.c31,[self.TM,self.TN]),-1,1)
self.clamped_c32=constraint_gamma*K.clip(tf.tile(self.c32,[self.TM,self.TN]),-1,1)
self.clamped_c33=constraint_gamma*K.clip(tf.tile(self.c33,[self.TM,self.TN]),-1,1)
self.clamped_c34=constraint_gamma*K.clip(tf.tile(self.c34,[self.TM,self.TN]),-1,1)
self.clamped_c35=constraint_gamma*K.clip(tf.tile(self.c35,[self.TM,self.TN]),-1,1)
self.clamped_c36=constraint_gamma*K.clip(tf.tile(self.c36,[self.TM,self.TN]),-1,1)
self.clamped_c37=constraint_gamma*K.clip(tf.tile(self.c37,[self.TM,self.TN]),-1,1)
self.clamped_c38=constraint_gamma*K.clip(tf.tile(self.c38,[self.TM,self.TN]),-1,1)
self.clamped_c39=constraint_gamma*K.clip(tf.tile(self.c39,[self.TM,self.TN]),-1,1)
self.clamped_c40=constraint_gamma*K.clip(tf.tile(self.c40,[self.TM,self.TN]),-1,1)
self.clamped_c41=constraint_gamma*K.clip(tf.tile(self.c41,[self.TM,self.TN]),-1,1)
self.clamped_c42=constraint_gamma*K.clip(tf.tile(self.c42,[self.TM,self.TN]),-1,1)
self.clamped_c43=constraint_gamma*K.clip(tf.tile(self.c43,[self.TM,self.TN]),-1,1)
self.clamped_c44=constraint_gamma*K.clip(tf.tile(self.c44,[self.TM,self.TN]),-1,1)
self.clamped_c45=constraint_gamma*K.clip(tf.tile(self.c45,[self.TM,self.TN]),-1,1)
self.clamped_c46=constraint_gamma*K.clip(tf.tile(self.c46,[self.TM,self.TN]),-1,1)
self.clamped_c47=constraint_gamma*K.clip(tf.tile(self.c47,[self.TM,self.TN]),-1,1)
self.clamped_c48=constraint_gamma*K.clip(tf.tile(self.c48,[self.TM,self.TN]),-1,1)
self.clamped_c49=constraint_gamma*K.clip(tf.tile(self.c49,[self.TM,self.TN]),-1,1)
self.clamped_c50=constraint_gamma*K.clip(tf.tile(self.c50,[self.TM,self.TN]),-1,1)
self.clamped_c51=constraint_gamma*K.clip(tf.tile(self.c51,[self.TM,self.TN]),-1,1)
self.clamped_c52=constraint_gamma*K.clip(tf.tile(self.c52,[self.TM,self.TN]),-1,1)
self.clamped_c53=constraint_gamma*K.clip(tf.tile(self.c53,[self.TM,self.TN]),-1,1)
self.clamped_c54=constraint_gamma*K.clip(tf.tile(self.c54,[self.TM,self.TN]),-1,1)
self.clamped_c55=constraint_gamma*K.clip(tf.tile(self.c55,[self.TM,self.TN]),-1,1)
self.clamped_c56=constraint_gamma*K.clip(tf.tile(self.c56,[self.TM,self.TN]),-1,1)
self.clamped_c57=constraint_gamma*K.clip(tf.tile(self.c57,[self.TM,self.TN]),-1,1)
self.clamped_c58=constraint_gamma*K.clip(tf.tile(self.c58,[self.TM,self.TN]),-1,1)
self.clamped_c59=constraint_gamma*K.clip(tf.tile(self.c59,[self.TM,self.TN]),-1,1)
self.clamped_c60=constraint_gamma*K.clip(tf.tile(self.c60,[self.TM,self.TN]),-1,1)
self.clamped_c61=constraint_gamma*K.clip(tf.tile(self.c61,[self.TM,self.TN]),-1,1)
self.clamped_c62=constraint_gamma*K.clip(tf.tile(self.c62,[self.TM,self.TN]),-1,1)
self.clamped_c63=constraint_gamma*K.clip(tf.tile(self.c63,[self.TM,self.TN]),-1,1)
self.clamped_c64=constraint_gamma*K.clip(tf.tile(self.c64,[self.TM,self.TN]),-1,1)
else:
self.clamped_w1 =binarize(self.w1)
self.clamped_w2 =binarize(self.w2)
self.clamped_w3 =binarize(self.w3)
self.clamped_c1= constraint_gamma*binarize(tf.tile(self.c1, [self.TM,self.TN]))
self.clamped_c2= constraint_gamma*binarize(tf.tile(self.c2, [self.TM,self.TN]))
self.clamped_c3= constraint_gamma*binarize(tf.tile(self.c3, [self.TM,self.TN]))
self.clamped_c4= constraint_gamma*binarize(tf.tile(self.c4, [self.TM,self.TN]))
self.clamped_c5= constraint_gamma*binarize(tf.tile(self.c5, [self.TM,self.TN]))
self.clamped_c6= constraint_gamma*binarize(tf.tile(self.c6, [self.TM,self.TN]))
self.clamped_c7= constraint_gamma*binarize(tf.tile(self.c7, [self.TM,self.TN]))
self.clamped_c8= constraint_gamma*binarize(tf.tile(self.c8, [self.TM,self.TN]))
self.clamped_c9= constraint_gamma*binarize(tf.tile(self.c9, [self.TM,self.TN]))
self.clamped_c10=constraint_gamma*binarize(tf.tile(self.c10,[self.TM,self.TN]))
self.clamped_c11=constraint_gamma*binarize(tf.tile(self.c11,[self.TM,self.TN]))
self.clamped_c12=constraint_gamma*binarize(tf.tile(self.c12,[self.TM,self.TN]))
self.clamped_c13=constraint_gamma*binarize(tf.tile(self.c13,[self.TM,self.TN]))
self.clamped_c14=constraint_gamma*binarize(tf.tile(self.c14,[self.TM,self.TN]))
self.clamped_c15=constraint_gamma*binarize(tf.tile(self.c15,[self.TM,self.TN]))
self.clamped_c16=constraint_gamma*binarize(tf.tile(self.c16,[self.TM,self.TN]))
self.clamped_c17=constraint_gamma*binarize(tf.tile(self.c17,[self.TM,self.TN]))
self.clamped_c18=constraint_gamma*binarize(tf.tile(self.c18,[self.TM,self.TN]))
self.clamped_c19=constraint_gamma*binarize(tf.tile(self.c19,[self.TM,self.TN]))
self.clamped_c20=constraint_gamma*binarize(tf.tile(self.c20,[self.TM,self.TN]))
self.clamped_c21=constraint_gamma*binarize(tf.tile(self.c21,[self.TM,self.TN]))
self.clamped_c22=constraint_gamma*binarize(tf.tile(self.c22,[self.TM,self.TN]))
self.clamped_c23=constraint_gamma*binarize(tf.tile(self.c23,[self.TM,self.TN]))
self.clamped_c24=constraint_gamma*binarize(tf.tile(self.c24,[self.TM,self.TN]))
self.clamped_c25=constraint_gamma*binarize(tf.tile(self.c25,[self.TM,self.TN]))
self.clamped_c26=constraint_gamma*binarize(tf.tile(self.c26,[self.TM,self.TN]))
self.clamped_c27=constraint_gamma*binarize(tf.tile(self.c27,[self.TM,self.TN]))
self.clamped_c28=constraint_gamma*binarize(tf.tile(self.c28,[self.TM,self.TN]))
self.clamped_c29=constraint_gamma*binarize(tf.tile(self.c29,[self.TM,self.TN]))
self.clamped_c30=constraint_gamma*binarize(tf.tile(self.c30,[self.TM,self.TN]))
self.clamped_c31=constraint_gamma*binarize(tf.tile(self.c31,[self.TM,self.TN]))
self.clamped_c32=constraint_gamma*binarize(tf.tile(self.c32,[self.TM,self.TN]))
self.clamped_c33=constraint_gamma*binarize(tf.tile(self.c33,[self.TM,self.TN]))
self.clamped_c34=constraint_gamma*binarize(tf.tile(self.c34,[self.TM,self.TN]))
self.clamped_c35=constraint_gamma*binarize(tf.tile(self.c35,[self.TM,self.TN]))
self.clamped_c36=constraint_gamma*binarize(tf.tile(self.c36,[self.TM,self.TN]))
self.clamped_c37=constraint_gamma*binarize(tf.tile(self.c37,[self.TM,self.TN]))
self.clamped_c38=constraint_gamma*binarize(tf.tile(self.c38,[self.TM,self.TN]))
self.clamped_c39=constraint_gamma*binarize(tf.tile(self.c39,[self.TM,self.TN]))
self.clamped_c40=constraint_gamma*binarize(tf.tile(self.c40,[self.TM,self.TN]))
self.clamped_c41=constraint_gamma*binarize(tf.tile(self.c41,[self.TM,self.TN]))
self.clamped_c42=constraint_gamma*binarize(tf.tile(self.c42,[self.TM,self.TN]))
self.clamped_c43=constraint_gamma*binarize(tf.tile(self.c43,[self.TM,self.TN]))
self.clamped_c44=constraint_gamma*binarize(tf.tile(self.c44,[self.TM,self.TN]))
self.clamped_c45=constraint_gamma*binarize(tf.tile(self.c45,[self.TM,self.TN]))
self.clamped_c46=constraint_gamma*binarize(tf.tile(self.c46,[self.TM,self.TN]))
self.clamped_c47=constraint_gamma*binarize(tf.tile(self.c47,[self.TM,self.TN]))
self.clamped_c48=constraint_gamma*binarize(tf.tile(self.c48,[self.TM,self.TN]))
self.clamped_c49=constraint_gamma*binarize(tf.tile(self.c49,[self.TM,self.TN]))
self.clamped_c50=constraint_gamma*binarize(tf.tile(self.c50,[self.TM,self.TN]))
self.clamped_c51=constraint_gamma*binarize(tf.tile(self.c51,[self.TM,self.TN]))
self.clamped_c52=constraint_gamma*binarize(tf.tile(self.c52,[self.TM,self.TN]))
self.clamped_c53=constraint_gamma*binarize(tf.tile(self.c53,[self.TM,self.TN]))
self.clamped_c54=constraint_gamma*binarize(tf.tile(self.c54,[self.TM,self.TN]))
self.clamped_c55=constraint_gamma*binarize(tf.tile(self.c55,[self.TM,self.TN]))
self.clamped_c56=constraint_gamma*binarize(tf.tile(self.c56,[self.TM,self.TN]))
self.clamped_c57=constraint_gamma*binarize(tf.tile(self.c57,[self.TM,self.TN]))
self.clamped_c58=constraint_gamma*binarize(tf.tile(self.c58,[self.TM,self.TN]))
self.clamped_c59=constraint_gamma*binarize(tf.tile(self.c59,[self.TM,self.TN]))
self.clamped_c60=constraint_gamma*binarize(tf.tile(self.c60,[self.TM,self.TN]))
self.clamped_c61=constraint_gamma*binarize(tf.tile(self.c61,[self.TM,self.TN]))
self.clamped_c62=constraint_gamma*binarize(tf.tile(self.c62,[self.TM,self.TN]))
self.clamped_c63=constraint_gamma*binarize(tf.tile(self.c63,[self.TM,self.TN]))
self.clamped_c64=constraint_gamma*binarize(tf.tile(self.c64,[self.TM,self.TN]))
# Special hack for randomising the subsequent input connections: tensorflow does not support advanced matrix indexing
shuf_x=tf.transpose(x, perm=[2, 0, 1])
shuf_x_0 = tf.gather_nd(shuf_x, tf.cast(self.rand_map_exp_0, tf.int32))
shuf_x_0=tf.transpose(shuf_x_0, perm=[1, 2, 0])
shuf_x_1 = tf.gather_nd(shuf_x, tf.cast(self.rand_map_exp_1, tf.int32))
shuf_x_1=tf.transpose(shuf_x_1, perm=[1, 2, 0])
x_pos=(1+binarize(x))/2*abs(x)
x_neg=(1-binarize(x))/2*abs(x)
xs0_pos=(1+binarize(shuf_x_0))/2#*abs(shuf_x_0)
xs0_neg=(1-binarize(shuf_x_0))/2#*abs(shuf_x_0)
xs1_pos=(1+binarize(shuf_x_1))/2#*abs(shuf_x_0)
xs1_neg=(1-binarize(shuf_x_1))/2#*abs(shuf_x_0)
ws0_pos=(1+binarize(self.clamped_w1))/2
ws0_neg=(1-binarize(self.clamped_w1))/2
ws1_pos=(1+binarize(self.clamped_w2))/2
ws1_neg=(1-binarize(self.clamped_w2))/2
ws2_pos=(1+binarize(self.clamped_w3))/2
ws2_neg=(1-binarize(self.clamped_w3))/2
self.out= K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c1 *ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c2 *ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c3 *ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c4 *ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c5 *ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c6 *ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c7 *ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c8 *ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c9 *ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c10*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c11*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c12*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c13*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c14*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c15*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c16*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c17*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c18*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c19*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c20*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c21*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c22*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c23*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c24*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c25*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c26*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c27*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c28*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c29*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c30*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c31*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c32*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c33*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c34*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c35*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c36*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c37*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c38*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c39*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_pos[0,:,:],self.clamped_c40*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c41*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c42*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c43*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c44*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c45*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c46*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c47*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_pos[0,:,:]*xs1_neg[0,:,:],self.clamped_c48*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c49*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c50*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c51*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c52*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c53*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c54*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c55*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_pos[0,:,:],self.clamped_c56*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c57*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c58*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c59*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c60*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c61*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c62*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c63*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[0,:,:]*xs0_neg[0,:,:]*xs1_neg[0,:,:],self.clamped_c64*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c1 *ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c2 *ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c3 *ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c4 *ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c5 *ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c6 *ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c7 *ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c8 *ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c9 *ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c10*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c11*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c12*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c13*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c14*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c15*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c16*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c17*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c18*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c19*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c20*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c21*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c22*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c23*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c24*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c25*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c26*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c27*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c28*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c29*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c30*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c31*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_pos[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c32*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c33*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c34*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c35*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c36*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c37*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c38*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c39*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_pos[1,:,:],self.clamped_c40*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c41*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c42*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c43*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c44*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c45*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c46*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c47*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_pos[1,:,:]*xs1_neg[1,:,:],self.clamped_c48*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c49*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c50*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c51*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c52*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c53*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c54*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c55*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_pos[1,:,:],self.clamped_c56*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c57*ws0_pos*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c58*ws0_pos*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c59*ws0_pos*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c60*ws0_pos*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c61*ws0_neg*ws1_pos*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c62*ws0_neg*ws1_pos*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c63*ws0_neg*ws1_neg*ws2_pos*tf.tile(self.pruning_mask,[self.TM,self.TN]))
self.out=self.out+K.dot(x_neg[1,:,:]*xs0_neg[1,:,:]*xs1_neg[1,:,:],self.clamped_c64*ws0_neg*ws1_neg*ws2_neg*tf.tile(self.pruning_mask,[self.TM,self.TN]))
else:
x_expanded=0
if self.BINARY==False:
self.clamped_w=constraint_gamma*K.clip(self.w,-1,1)
else:
self.clamped_w=constraint_gamma*binarize(self.w)
for l in range(self.levels):
x_expanded=x_expanded+x[l,:,:]
self.out=K.dot(x_expanded,self.clamped_w*tf.tile(self.pruning_mask,[self.TM,self.TN]))
elif self.levels==3:
if self.LUT==True:
self.clamped_w1=constraint_gamma*binarize(self.w1)
self.clamped_w2=constraint_gamma*binarize(self.w2)
self.clamped_w3=constraint_gamma*binarize(self.w3)
self.clamped_w4=constraint_gamma*binarize(self.w4)
self.clamped_w5=constraint_gamma*binarize(self.w5)
self.clamped_w6=constraint_gamma*binarize(self.w6)
self.clamped_w7=constraint_gamma*binarize(self.w7)
self.clamped_w8=constraint_gamma*binarize(self.w8)
x_pos=(1+x)/2
x_neg=(1-x)/2
self.out=K.dot(x_pos[0,:,:]*x_pos[1,:,:]*x_pos[2,:,:],self.clamped_w1)
self.out=self.out+K.dot(x_pos[0,:,:]*x_pos[1,:,:]*x_neg[2,:,:],self.clamped_w2)
self.out=self.out+K.dot(x_pos[0,:,:]*x_neg[1,:,:]*x_pos[2,:,:],self.clamped_w3)
self.out=self.out+K.dot(x_pos[0,:,:]*x_neg[1,:,:]*x_neg[2,:,:],self.clamped_w4)
self.out=self.out+K.dot(x_neg[0,:,:]*x_pos[1,:,:]*x_pos[2,:,:],self.clamped_w5)
self.out=self.out+K.dot(x_neg[0,:,:]*x_pos[1,:,:]*x_neg[2,:,:],self.clamped_w6)
self.out=self.out+K.dot(x_neg[0,:,:]*x_neg[1,:,:]*x_pos[2,:,:],self.clamped_w7)
self.out=self.out+K.dot(x_neg[0,:,:]*x_neg[1,:,:]*x_neg[2,:,:],self.clamped_w8)
else:
x_expanded=0
self.clamped_w=constraint_gamma*binarize(self.w)
for l in range(self.levels):
x_expanded=x_expanded+x[l,:,:]
self.out=K.dot(x_expanded,self.clamped_w)
# x_expanded=0
# if self.levels==1:
# self.clamped_w=constraint_gamma*binarize(self.w)
# self.out=K.dot(x,self.clamped_w)
# else:
# self.clamped_w=constraint_gamma*binarize(self.w)
# for l in range(self.levels):
# x_expanded=x_expanded+x[l,:,:]
# self.out=K.dot(x_expanded,self.clamped_w)
return self.out
def get_output_shape_for(self,input_shape):
return (input_shape[0], self.n_out)
def compute_output_shape(self,input_shape):
return (input_shape[0], self.n_out)
"""
def binarize(x):
#Clip and binarize tensor using the straight through estimator (STE) for the gradient.
g = tf.get_default_graph()
with ops.name_scope("Binarized") as name:
with g.gradient_override_map({"Sign": "Identity"}):
x=tf.clip_by_value(x,-1,1)
return tf.sign(x)
class Residual_sign(Layer):
def __init__(self, levels=1,**kwargs):
self.levels=levels
super(Residual_sign, self).__init__(**kwargs)
def build(self, input_shape):
ars=np.arange(self.levels)+1.0
ars=ars[::-1]
self.means=ars/np.sum(ars)
self.means=tf.Variable(self.means,dtype=tf.float32)
K.get_session().run(tf.variables_initializer([self.means]))
self.trainable_weights=[self.means]
def call(self, x,mask=None):
resid = x
out_bin=0
for l in range(self.levels):
out=binarize(resid)*K.abs(self.means[l])
out_bin=out_bin+out
resid=resid-out
return out_bin
def compute_output_shape(self,input_shape):
return input_shape
def set_means(self,X):
means=np.zeros((self.levels))
means[0]=1
resid=np.clip(X,-1,1)
approx=0
for l in range(self.levels):
m=np.mean(np.absolute(resid))
out=np.sign(resid)*m
approx=approx+out
resid=resid-out
means[l]=m
err=np.mean((approx-np.clip(X,-1,1))**2)
means=means/np.sum(means)
sess=K.get_session()
sess.run(self.means.assign(means))
class binary_conv(Layer):
def __init__(self,nfilters,ch_in,k,padding,**kwargs):
self.nfilters=nfilters
self.ch_in=ch_in
self.k=k
self.padding=padding
super(binary_conv,self).__init__(**kwargs)
def build(self, input_shape):
stdv=1/np.sqrt(self.k*self.k*self.ch_in)
w = tf.random_normal(shape=[self.k,self.k,self.ch_in,self.nfilters], mean=0.0, stddev=stdv, dtype=tf.float32)
self.w=K.variable(w)
self.gamma=K.variable([1.0])
self.trainable_weights=[self.w,self.gamma]
def call(self, x,mask=None):
constraint_gamma=K.abs(self.gamma)
self.clamped_w=constraint_gamma*binarize(self.w)
self.out=K.conv2d(x, kernel=self.clamped_w, padding=self.padding)#tf.nn.convolution(x, filter=self.clamped_w , padding=self.padding)
self.output_dim=self.out.get_shape()
#self.out=Convolution2D(filters=32, kernel_size=(3,3), strides=(1, 1), padding='valid', use_bias=False)(x)
return self.out
def compute_output_shape(self,input_shape):
return (input_shape[0], self.output_dim[1],self.output_dim[2],self.output_dim[3])
class binary_dense(Layer):
def __init__(self,n_in,n_out,**kwargs):
self.n_in=n_in
self.n_out=n_out
super(binary_dense,self).__init__(**kwargs)
def build(self, input_shape):
stdv=1/np.sqrt(self.n_in)
w = tf.random_normal(shape=[self.n_in,self.n_out], mean=0.0, stddev=stdv, dtype=tf.float32)
self.w=K.variable(w)
self.gamma=K.variable([1.0])
self.trainable_weights=[self.w,self.gamma]
def call(self, x, mask=None):
constraint_gamma=K.abs(self.gamma)
self.clamped_w=constraint_gamma*binarize(self.w)
self.out=K.dot(x, self.clamped_w)
self.output_dim=self.out.get_shape()
return self.out
def compute_output_shape(self,input_shape):
return (input_shape[0], self.output_dim[1])
"""
class my_flat(Layer):
def __init__(self,**kwargs):
super(my_flat,self).__init__(**kwargs)
def build(self, input_shape):
return
def call(self, x, mask=None):
self.out=tf.reshape(x,[-1,np.prod(x.get_shape().as_list()[1:])])
return self.out
def compute_output_shape(self,input_shape):
shpe=(input_shape[0],int(np.prod(input_shape[1:])))
return shpe
| 80.731413
| 219
| 0.719319
| 22,662
| 120,532
| 3.662563
| 0.015135
| 0.080842
| 0.06865
| 0.074457
| 0.973133
| 0.969121
| 0.963073
| 0.959723
| 0.954892
| 0.948013
| 0
| 0.056814
| 0.078842
| 120,532
| 1,492
| 220
| 80.785523
| 0.690747
| 0.055745
| 0
| 0.446721
| 0
| 0
| 0.001313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.014754
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
879385ea0129e886b65eb51485ba81830a0ebdb0
| 4,070
|
py
|
Python
|
applications/monitoring/controllers/despesa.py
|
BetinRibeiro/web2py_crediario
|
d7b0aef4579870922c6d87b4b0322b427b2bef98
|
[
"BSD-3-Clause"
] | 2
|
2019-10-18T23:04:22.000Z
|
2019-10-24T04:03:10.000Z
|
applications/monitoring/controllers/despesa.py
|
BetinRibeiro/web2py_crediario
|
d7b0aef4579870922c6d87b4b0322b427b2bef98
|
[
"BSD-3-Clause"
] | null | null | null |
applications/monitoring/controllers/despesa.py
|
BetinRibeiro/web2py_crediario
|
d7b0aef4579870922c6d87b4b0322b427b2bef98
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# tente algo como
def listar_desp_local():
proj = db.projeto(request.args(0, cast=int))
rows = db((db.despesa.projeto == request.args(0, cast=int))& (db.despesa.tipo_desp == "Local") ).select()
return locals()
def listar_desp_venda():
proj = db.projeto(request.args(0, cast=int))
rows = db((db.despesa.projeto == request.args(0, cast=int)) & (db.despesa.tipo_desp == "Venda") ).select()
return locals()
def listar_desp_cobranca():
proj = db.projeto(request.args(0, cast=int))
rows = db((db.despesa.projeto == request.args(0, cast=int))& (db.despesa.tipo_desp == "Cobranca") ).select()
return locals()
def inserir_desp_local():
proj = db.projeto(request.args(0, cast=int))
db.despesa.projeto.default = proj.id
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
db.despesa.tipo_desp.default = "Local"
db.despesa.tipo_desp.readable = True
db.despesa.tipo_desp.writable = False
merc = db(db.despesa.projeto==proj.id).select()
form = SQLFORM(db.despesa).process()
if form.accepted:
response.flash = 'Formulario aceito'
redirect(URL('listar_desp_local', args=proj.id))
elif form.errors:
response.flash = 'Formulario não aceito'
else:
response.flash = 'Preencha o formulario'
return locals()
def alterar_desp_local():
merc = db.despesa(request.args(0, cast=int))
proj = db.projeto(merc.projeto)
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
form = SQLFORM(db.despesa, request.args(0, cast=int))
if form.process().accepted:
session.flash = 'Despesa atualizada'
redirect(URL('listar_desp_local', args=proj.id))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
def inserir_desp_venda():
proj = db.projeto(request.args(0, cast=int))
db.despesa.projeto.default = proj.id
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
db.despesa.tipo_desp.default = "Venda"
db.despesa.tipo_desp.readable = True
db.despesa.tipo_desp.writable = False
merc = db(db.despesa.projeto==proj.id).select()
form = SQLFORM(db.despesa).process()
if form.accepted:
response.flash = 'Formulario aceito'
redirect(URL('listar_desp_venda', args=proj.id))
elif form.errors:
response.flash = 'Formulario não aceito'
else:
response.flash = 'Preencha o formulario'
return locals()
def alterar_desp_venda():
merc = db.despesa(request.args(0, cast=int))
proj = db.projeto(merc.projeto)
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
form = SQLFORM(db.despesa, request.args(0, cast=int))
if form.process().accepted:
session.flash = 'Despesa atualizada'
redirect(URL('listar_desp_venda', args=proj.id))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
def inserir_desp_cobranca():
proj = db.projeto(request.args(0, cast=int))
db.despesa.projeto.default = proj.id
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
db.despesa.tipo_desp.default = "Cobranca"
db.despesa.tipo_desp.readable = True
db.despesa.tipo_desp.writable = False
merc = db(db.despesa.projeto==proj.id).select()
form = SQLFORM(db.despesa).process()
if form.accepted:
response.flash = 'Formulario aceito'
redirect(URL('listar_desp_cobranca', args=proj.id))
elif form.errors:
response.flash = 'Formulario não aceito'
else:
response.flash = 'Preencha o formulario'
return locals()
def alterar_desp_cobranca():
merc = db.despesa(request.args(0, cast=int))
proj = db.projeto(merc.projeto)
db.despesa.projeto.readable = False
db.despesa.projeto.writable = False
form = SQLFORM(db.despesa, request.args(0, cast=int))
if form.process().accepted:
session.flash = 'Despesa atualizada'
redirect(URL('listar_desp_cobranca', args=proj.id))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
| 30.601504
| 109
| 0.72801
| 582
| 4,070
| 5.0189
| 0.099656
| 0.129408
| 0.115029
| 0.082164
| 0.968504
| 0.960972
| 0.942485
| 0.942485
| 0.942485
| 0.942485
| 0
| 0.004497
| 0.125799
| 4,070
| 132
| 110
| 30.833333
| 0.81647
| 0.009091
| 0
| 0.864865
| 0
| 0
| 0.124318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87ba61c5b21e9b42dc2ac938251f33519824249d
| 120
|
py
|
Python
|
CodeHS/Unit 8/8.4/ladder.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 8/8.4/ladder.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 8/8.4/ladder.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
01110
01110
00000
01110
01110
00000
01110
01110
00000
01110
01110
00000
01110
01110
00000
01110
01110
00000
01110
01110
| 5.714286
| 5
| 0.833333
| 20
| 120
| 5
| 0.1
| 0.7
| 0.9
| 1.2
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0.166667
| 120
| 20
| 6
| 6
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
87d8d367d55ccfe9ab418f13dbd1bdbedeb4bb3a
| 21,846
|
py
|
Python
|
packages/pytiger2c/grammar/cache/parser.py
|
yasserglez/pytiger2c
|
35c44d14775bf69ed6689b708b98d6d1ca533ba0
|
[
"MIT"
] | 2
|
2015-11-16T11:50:24.000Z
|
2017-09-27T23:18:16.000Z
|
packages/pytiger2c/grammar/cache/parser.py
|
yasserglez/pytiger2c
|
35c44d14775bf69ed6689b708b98d6d1ca533ba0
|
[
"MIT"
] | null | null | null |
packages/pytiger2c/grammar/cache/parser.py
|
yasserglez/pytiger2c
|
35c44d14775bf69ed6689b708b98d6d1ca533ba0
|
[
"MIT"
] | null | null | null |
# /home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/cache/parser.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '\x861\xe3#\xea9\x87\xa0r-\xc7A<i\x07\x06'
_lr_action_items = {'DO':([2,3,4,9,11,12,15,19,41,42,46,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,100,104,105,106,123,],[-3,-4,-5,-30,-2,-28,40,-8,-31,-22,-21,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-29,116,-6,-25,-27,]),'THEN':([2,3,4,9,11,12,19,26,41,42,46,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,100,105,106,123,],[-3,-4,-5,-30,-2,-28,-8,53,-31,-22,-21,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-29,-6,-25,-27,]),'LBRACKET':([4,9,41,77,83,],[18,24,-31,-33,-32,]),'WHILE':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,]),'COLON':([90,109,117,],[101,119,124,]),'INTLIT':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,]),'MINUS':([0,1,2,3,4,5,8,9,10,11,12,14,15,17,18,19,22,24,25,26,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,50,52,53,58,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,93,95,96,97,98,100,102,104,105,106,111,116,120,123,125,128,132,133,134,],[5,5,-3,-4,-5,5,5,-30,5,-2,-28,38,38,5,5,-8,38,5,5,38,5,5,5,5,5,5,5,5,5,5,5,5,5,-31,38,38,5,5,-21,38,38,5,5,38,38,38,-12,38,-11,38,38,-9,38,-10,38,38,-33,38,38,-7,5,-32,-23,5,38,5,38,5,38,5,-29,5,38,38,38,38,5,5,38,5,38,38,5,38,]),'DIVIDE':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,31,31,-8,31,31,-31,31,31,-21,31,31,31,31,31,-12,31,-11,31,31,31,31,31,31,31,-33,31,31,-7,-32,-23,31,31,31,-29,31,31,31,31,31,31,31,31,]),'STRLIT':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,]),'LE':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,29,29,-8,29,29,-31,29,29,-21,29,29,29,None,None,-12,None,-11,None,None,-9,None,-10,29,29,-33,29,29,-7,-32,-23,29,29,29,-29,29,29,29,29,29,29,29,29,]),'RPAREN':([2,3,4,8,9,11,12,19,21,22,25,41,42,46,51,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,79,80,83,84,86,97,99,100,105,106,107,108,123,126,127,],[-3,-4,-5,-34,-30,-2,-28,-8,46,-36,-43,-31,-22,-21,84,-45,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-35,-7,-32,-23,-24,-44,-57,-29,-6,-25,-58,117,-27,-59,-60,]),'SEMICOLON':([2,3,4,8,9,11,12,19,21,22,41,42,46,58,64,65,66,67,68,69,70,71,72,73,74,75,76,77,79,80,83,84,86,89,100,105,106,123,],[-3,-4,-5,-34,-30,-2,-28,-8,45,-36,-31,-22,-21,-34,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-35,-7,-32,-23,-24,45,-29,-6,-25,-27,]),'NE':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,32,32,-8,32,32,-31,32,32,-21,32,32,32,None,None,-12,None,-11,None,None,-9,None,-10,32,32,-33,32,32,-7,-32,-23,32,32,32,-29,32,32,32,32,32,32,32,32,]),'TO':([2,3,4,9,11,12,19,41,42,46,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,80,83,84,86,100,105,106,123,],[-3,-4,-5,-30,-2,-28,-8,-31,-22,-21,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,93,-7,-32,-23,-24,-29,-6,-25,-27,]),'LT':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,34,34,-8,34,34,-31,34,34,-21,34,34,34,None,None,-12,None,-11,None,None,-9,None,-10,34,34,-33,34,34,-7,-32,-23,34,34,34,-29,34,34,34,34,34,34,34,34,]),'PLUS':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,36,36,-8,36,36,-31,36,36,-21,36,36,36,36,36,-12,36,-11,36,36,-9,36,-10,36,36,-33,36,36,-7,-32,-23,36,36,36,-29,36,36,36,36,36,36,36,36,]),'COMMA':([2,3,4,9,11,12,19,23,25,41,42,46,47,48,51,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,94,95,97,99,100,105,106,107,108,112,121,123,126,127,],[-3,-4,-5,-30,-2,-28,-8,-39,-43,-31,-22,-21,-40,81,85,-45,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-41,-42,-44,-57,-29,-6,-25,-58,118,-57,118,-27,-59,-60,]),'ARRAY':([103,],[114,]),'ASSIGN':([4,9,20,41,77,83,90,110,],[17,-30,44,-31,-33,-32,102,120,]),'$end':([2,3,4,6,9,11,12,14,19,41,42,46,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,100,105,106,123,],[-3,-4,-5,0,-30,-2,-28,-1,-8,-31,-22,-21,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-29,-6,-25,-27,]),'FUNCTION':([2,3,4,9,11,12,13,19,27,41,42,46,54,56,57,60,61,62,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,88,91,100,105,106,111,113,115,123,128,129,130,132,134,],[-3,-4,-5,-30,-2,-28,-37,-8,55,-31,-22,-21,-47,-49,-46,-51,55,-38,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-52,-50,-29,-6,-25,-61,-53,-54,-27,-62,-55,-56,-63,-64,]),'GT':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,35,35,-8,35,35,-31,35,35,-21,35,35,35,None,None,-12,None,-11,None,None,-9,None,-10,35,35,-33,35,35,-7,-32,-23,35,35,35,-29,35,35,35,35,35,35,35,35,]),'END':([2,3,4,9,11,12,19,22,41,42,46,58,64,65,66,67,68,69,70,71,72,73,74,75,76,77,79,80,83,84,86,89,100,105,106,123,],[-3,-4,-5,-30,-2,-28,-8,-36,-31,-22,-21,-34,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-35,-7,-32,-23,-24,100,-29,-6,-25,-27,]),'RBRACE':([2,3,4,9,11,12,19,23,41,42,46,47,48,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,94,95,100,105,106,107,112,121,123,126,127,],[-3,-4,-5,-30,-2,-28,-8,-39,-31,-22,-21,-40,80,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-41,-42,-29,-6,-25,-58,-57,129,-27,-59,-60,]),'FOR':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,]),'PERIOD':([4,9,41,77,83,],[16,-30,-31,-33,-32,]),'ELSE':([2,3,4,9,11,12,19,41,42,46,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,100,105,106,123,],[-3,-4,-5,-30,-2,-28,-8,-31,-22,-21,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,98,-29,-6,-25,-27,]),'GE':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,30,30,-8,30,30,-31,30,30,-21,30,30,30,None,None,-12,None,-11,None,None,-9,None,-10,30,30,-33,30,30,-7,-32,-23,30,30,30,-29,30,30,30,30,30,30,30,30,]),'LPAREN':([0,1,5,8,9,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,87,93,96,98,102,116,120,125,133,],[8,8,8,8,25,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,99,8,8,8,8,8,8,8,8,]),'IN':([2,3,4,9,11,12,13,19,27,41,42,46,54,56,57,60,61,62,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,88,91,100,105,106,111,113,115,123,128,129,130,132,134,],[-3,-4,-5,-30,-2,-28,-37,-8,58,-31,-22,-21,-47,-49,-46,-51,-48,-38,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-52,-50,-29,-6,-25,-61,-53,-54,-27,-62,-55,-56,-63,-64,]),'VAR':([2,3,4,9,11,12,13,19,27,41,42,46,54,56,57,60,61,62,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,88,91,100,105,106,111,113,115,123,128,129,130,132,134,],[-3,-4,-5,-30,-2,-28,-37,-8,59,-31,-22,-21,-47,-49,-46,-51,-48,-38,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-52,-50,-29,-6,-25,-61,-53,-54,-27,-62,-55,-56,-63,-64,]),'TIMES':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,33,33,-8,33,33,-31,33,33,-21,33,33,33,33,33,-12,33,-11,33,33,33,33,33,33,33,-33,33,33,-7,-32,-23,33,33,33,-29,33,33,33,33,33,33,33,33,]),'EQ':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,49,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,92,95,97,100,104,105,106,111,117,123,128,131,132,134,],[-3,-4,-5,-30,-2,-28,37,37,-8,37,37,-31,37,37,-21,82,37,37,37,None,None,-12,None,-11,None,None,-9,None,-10,37,37,-33,37,37,-7,-32,-23,37,103,37,37,-29,37,37,37,37,125,37,37,133,37,37,]),'ID':([0,1,5,7,8,10,16,17,18,23,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,55,58,59,63,81,82,85,93,96,98,99,101,102,103,112,116,118,119,120,122,124,125,133,],[9,9,9,20,9,9,41,9,9,49,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,87,9,90,92,49,9,9,9,9,9,109,110,9,115,109,9,109,127,9,130,131,9,9,]),'IF':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,]),'AND':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,28,28,-8,28,28,-31,28,28,-21,28,28,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,28,28,-33,28,28,-7,-32,-23,28,28,28,-29,28,28,28,28,28,28,28,28,]),'LBRACE':([9,103,],[23,112,]),'NIL':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),'OF':([83,114,],[96,122,]),'BREAK':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'LET':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,]),'RBRACKET':([2,3,4,9,11,12,19,41,42,43,46,50,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,100,105,106,123,],[-3,-4,-5,-30,-2,-28,-8,-31,-22,77,-21,83,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-29,-6,-25,-27,]),'TYPE':([2,3,4,9,11,12,13,19,27,41,42,46,54,56,57,60,61,62,64,65,66,67,68,69,70,71,72,73,74,75,76,77,80,83,84,86,88,91,100,105,106,111,113,115,123,128,129,130,132,134,],[-3,-4,-5,-30,-2,-28,-37,-8,63,-31,-22,-21,-47,-49,63,-51,-48,-38,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,-26,-33,-7,-32,-23,-24,-52,-50,-29,-6,-25,-61,-53,-54,-27,-62,-55,-56,-63,-64,]),'OR':([2,3,4,9,11,12,14,15,19,22,26,41,42,43,46,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,83,84,86,95,97,100,104,105,106,111,123,128,132,134,],[-3,-4,-5,-30,-2,-28,39,39,-8,39,39,-31,39,39,-21,39,39,-19,-16,-18,-12,-14,-11,-15,-17,-9,-13,-10,-20,39,-33,39,39,-7,-32,-23,39,39,39,-29,39,39,39,39,39,39,39,39,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'func_dec_group':([27,],[61,]),'func_dec':([27,61,],[56,91,]),'expr_seq':([8,58,],[21,89,]),'field_type':([99,112,118,],[107,107,126,]),'expr_list':([25,],[51,]),'lvalue':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,]),'var_dec':([27,],[54,]),'field_assign':([23,81,],[47,94,]),'field_list':([23,],[48,]),'field_types':([99,112,],[108,121,]),'program':([0,],[6,]),'expr':([0,1,5,8,10,17,18,24,25,28,29,30,31,32,33,34,35,36,37,38,39,40,44,45,53,58,82,85,93,96,98,102,116,120,125,133,],[14,15,19,22,26,42,43,50,52,64,65,66,67,68,69,70,71,72,73,74,75,76,78,79,86,22,95,97,104,105,106,111,123,128,132,134,]),'type_dec':([27,57,],[60,88,]),'type_dec_group':([27,],[57,]),'dec':([27,],[62,]),'type':([103,],[113,]),'dec_group':([13,],[27,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> program","S'",1,None,None,None),
('program -> expr','program',1,'p_program','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',58),
('expr -> NIL','expr',1,'p_expr_nil','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',68),
('expr -> INTLIT','expr',1,'p_expr_int','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',73),
('expr -> STRLIT','expr',1,'p_expr_str','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',78),
('expr -> lvalue','expr',1,'p_expr_lvalue','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',84),
('expr -> ID LBRACKET expr RBRACKET OF expr','expr',6,'p_expr_array','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',89),
('expr -> ID LBRACE field_list RBRACE','expr',4,'p_expr_record','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',97),
('expr -> MINUS expr','expr',2,'p_expr_unary_minus','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',103),
('expr -> expr PLUS expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',111),
('expr -> expr MINUS expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',112),
('expr -> expr TIMES expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',113),
('expr -> expr DIVIDE expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',114),
('expr -> expr EQ expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',115),
('expr -> expr NE expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',116),
('expr -> expr LT expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',117),
('expr -> expr LE expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',118),
('expr -> expr GT expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',119),
('expr -> expr GE expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',120),
('expr -> expr AND expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',121),
('expr -> expr OR expr','expr',3,'p_expr_bin_op','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',122),
('expr -> LPAREN expr_seq RPAREN','expr',3,'p_expr_expr_seq','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',154),
('expr -> lvalue ASSIGN expr','expr',3,'p_expr_assign','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',160),
('expr -> ID LPAREN expr_list RPAREN','expr',4,'p_expr_func','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',168),
('expr -> IF expr THEN expr','expr',4,'p_expr_if','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',176),
('expr -> IF expr THEN expr ELSE expr','expr',6,'p_expr_if_else','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',183),
('expr -> WHILE expr DO expr','expr',4,'p_expr_while','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',191),
('expr -> FOR ID ASSIGN expr TO expr DO expr','expr',8,'p_expr_for','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',198),
('expr -> BREAK','expr',1,'p_expr_break','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',206),
('expr -> LET dec_group IN expr_seq END','expr',5,'p_expr_let','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',212),
('lvalue -> ID','lvalue',1,'p_lvalue_id','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',221),
('lvalue -> lvalue PERIOD ID','lvalue',3,'p_lvalue_record','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',226),
('lvalue -> ID LBRACKET expr RBRACKET','lvalue',4,'p_lvalue_array','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',232),
('lvalue -> lvalue LBRACKET expr RBRACKET','lvalue',4,'p_lvalue_array_lvalue','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',240),
('expr_seq -> <empty>','expr_seq',0,'p_expr_seq_empty','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',248),
('expr_seq -> expr_seq SEMICOLON expr','expr_seq',3,'p_expr_seq_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',252),
('expr_seq -> expr','expr_seq',1,'p_expr_seq_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',258),
('dec_group -> <empty>','dec_group',0,'p_dec_group_empty','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',267),
('dec_group -> dec_group dec','dec_group',2,'p_dec_group_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',271),
('field_list -> <empty>','field_list',0,'p_field_list_empty','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',280),
('field_list -> field_assign','field_list',1,'p_field_list_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',284),
('field_list -> field_list COMMA field_assign','field_list',3,'p_field_list_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',288),
('field_assign -> ID EQ expr','field_assign',3,'p_field_assign','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',294),
('expr_list -> <empty>','expr_list',0,'p_expr_list_empty','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',299),
('expr_list -> expr_list COMMA expr','expr_list',3,'p_expr_list_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',303),
('expr_list -> expr','expr_list',1,'p_expr_list_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',308),
('dec -> type_dec_group','dec',1,'p_dec_type_dec_group','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',316),
('dec -> var_dec','dec',1,'p_dec_var','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',321),
('dec -> func_dec_group','dec',1,'p_dec_func_dec_group','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',328),
('func_dec_group -> func_dec','func_dec_group',1,'p_func_dec_group_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',333),
('func_dec_group -> func_dec_group func_dec','func_dec_group',2,'p_func_dec_group_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',341),
('type_dec_group -> type_dec','type_dec_group',1,'p_type_dec_group_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',348),
('type_dec_group -> type_dec_group type_dec','type_dec_group',2,'p_type_dec_group_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',356),
('type_dec -> TYPE ID EQ type','type_dec',4,'p_type_dec','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',363),
('type -> ID','type',1,'p_type_alias','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',370),
('type -> LBRACE field_types RBRACE','type',3,'p_type_record','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',375),
('type -> ARRAY OF ID','type',3,'p_type_array','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',380),
('field_types -> <empty>','field_types',0,'p_field_types_empty','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',385),
('field_types -> field_type','field_types',1,'p_field_types_single','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',389),
('field_types -> field_types COMMA field_type','field_types',3,'p_field_types_multiple','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',393),
('field_type -> ID COLON ID','field_type',3,'p_field_type','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',401),
('var_dec -> VAR ID ASSIGN expr','var_dec',4,'p_var_dec_without_type','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',406),
('var_dec -> VAR ID COLON ID ASSIGN expr','var_dec',6,'p_var_dec_with_type','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',412),
('func_dec -> FUNCTION ID LPAREN field_types RPAREN EQ expr','func_dec',7,'p_func_dec_without_return','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',419),
('func_dec -> FUNCTION ID LPAREN field_types RPAREN COLON ID EQ expr','func_dec',9,'p_func_dec_with_return','/home/yasserglez/Workspace/PyTiger2C/packages/pytiger2c/grammar/parser.py',425),
]
| 232.404255
| 11,043
| 0.663188
| 4,982
| 21,846
| 2.839422
| 0.057808
| 0.064329
| 0.105684
| 0.147038
| 0.751661
| 0.727555
| 0.7219
| 0.70147
| 0.673123
| 0.591475
| 0
| 0.323652
| 0.025954
| 21,846
| 93
| 11,044
| 234.903226
| 0.341134
| 0.005951
| 0
| 0.02381
| 1
| 0
| 0.375092
| 0.22757
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e2106fa87f9189df4efa2fdf09b641c6235a50af
| 20,457
|
py
|
Python
|
codegen/yeppp/library/core/arm.py
|
wdv4758h/Yeppp-
|
deeca59a88bc2b014be802fd575757f7c26c180e
|
[
"BSD-3-Clause"
] | 30
|
2015-09-18T00:52:22.000Z
|
2021-11-03T17:44:30.000Z
|
codegen/yeppp/library/core/arm.py
|
wdv4758h/Yeppp-
|
deeca59a88bc2b014be802fd575757f7c26c180e
|
[
"BSD-3-Clause"
] | 1
|
2017-02-09T04:53:29.000Z
|
2017-02-09T04:53:29.000Z
|
codegen/yeppp/library/core/arm.py
|
wdv4758h/Yeppp-
|
deeca59a88bc2b014be802fd575757f7c26c180e
|
[
"BSD-3-Clause"
] | 6
|
2017-02-09T03:05:32.000Z
|
2022-03-17T06:50:19.000Z
|
#
# Yeppp! library implementation
#
# This file is part of Yeppp! library and licensed under the New BSD license.
# See LICENSE.txt for the full text of the license.
#
__author__ = 'Marat'
from peachpy.arm import *
class SCALAR:
@staticmethod
def AddSubtractMultiply_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, input_type, output_type, operation):
if output_type.is_integer():
if output_type.get_size() != 8:
acc = GeneralPurposeRegister()
LOAD.ELEMENT( acc, [xPointer], input_type, increment_pointer = True )
temp = GeneralPurposeRegister()
LOAD.ELEMENT( temp, [yPointer], input_type, increment_pointer = True )
COMPUTE = { 'Add': ADD, 'Subtract': SUB }[operation]
COMPUTE( acc, temp )
STORE.ELEMENT( [zPointer], acc, output_type, increment_pointer = True )
else:
assert input_type.get_size() == 8
acc_lo = GeneralPurposeRegister()
LDR( acc_lo, [xPointer], 4 )
acc_hi = GeneralPurposeRegister()
LDR( acc_hi, [xPointer], 4 )
temp_lo = GeneralPurposeRegister()
LDR( temp_lo, [yPointer], 4 )
temp_hi = GeneralPurposeRegister()
LDR( temp_hi, [yPointer], 4 )
if operation == "Add":
ADDS( acc_lo, temp_lo )
ADC( acc_hi, temp_hi )
elif operation == "Subtract":
SUBS( acc_lo, temp_lo )
SBC( acc_hi, temp_hi )
STR( acc_lo, [zPointer], 4 )
STR( acc_hi, [zPointer], 4 )
elif output_type.is_floating_point():
acc = { 4: SRegister(), 8: DRegister() }[output_type.get_size()]
LOAD.ELEMENT( acc, [xPointer], input_type, increment_pointer = True )
temp = { 4: SRegister(), 8: DRegister() }[output_type.get_size()]
LOAD.ELEMENT( temp, [yPointer], input_type, increment_pointer = True )
COMPUTE = { ('Add', 4): VADD.F32, ('Subtract', 4): VSUB.F32, ('Multiply', 4): VMUL.F32,
('Add', 8): VADD.F64, ('Subtract', 8): VSUB.F64, ('Multiply', 8): VMUL.F64}[operation, output_type.get_size()]
COMPUTE( acc, temp )
STORE.ELEMENT( [zPointer], acc, output_type, increment_pointer = True )
@staticmethod
def MinMax_VXusVXus_VYus(xPointer, yPointer, zPointer, ctype, operation):
acc = GeneralPurposeRegister()
LOAD.ELEMENT( acc, [xPointer], ctype, increment_pointer = True )
temp = GeneralPurposeRegister()
LOAD.ELEMENT( temp, [yPointer], ctype, increment_pointer = True )
CMP( acc, temp )
if operation == "Min":
if ctype.is_unsigned_integer():
MOVHI( acc, temp )
else:
MOVGT( acc, temp )
elif operation == "Max":
if ctype.is_unsigned_integer():
MOVLO( acc, temp )
else:
MOVLT( acc, temp )
STORE.ELEMENT( [zPointer], acc, ctype, increment_pointer = True )
@staticmethod
def AddSubtractMultiply_VXfVXf_VYf(xPointer, yPointer, zPointer, ctype, operation):
acc = DRegister() if ctype.get_size() == 8 else SRegister()
LOAD.ELEMENT( acc, [xPointer], ctype, increment_pointer = True )
temp = DRegister() if ctype.get_size() == 8 else SRegister()
LOAD.ELEMENT( temp, [yPointer], ctype, increment_pointer = True )
if ctype.get_size() == 8:
COMPUTE = { 'Add': VADD.F64, 'Subtract': VSUB.F64 }[operation]
else:
COMPUTE = { 'Add': VADD.F32, 'Subtract': VSUB.F32 }[operation]
COMPUTE( acc, temp )
STORE.ELEMENT( [zPointer], acc, ctype, increment_pointer = True )
def PipelineMap_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, length, batch_elements, input_type, output_type, scalar_function, instruction_columns, instruction_offsets):
# Check that we have an offset for each instruction column
assert len(instruction_columns) == len(instruction_offsets)
max_instructions = max(map(len, instruction_columns))
return_ok = Label("return_ok")
return_null_pointer = Label("return_null_pointer")
return_misaligned_pointer = Label("return_misaligned_pointer")
return_any = Label("return")
batch_process_finish = Label("batch_process_finish")
process_single = Label("process_single")
process_batch = Label("process_batch")
process_batch_prologue = Label("process_batch_prologue")
process_batch_epilogue = Label("process_batch_epilogue")
# Check parameters
TST( xPointer, xPointer )
BEQ( return_null_pointer )
if input_type.get_size() != 1:
TST( xPointer, input_type.get_size() - 1 )
BNE( return_misaligned_pointer )
TST( yPointer, yPointer )
BEQ( return_null_pointer )
if input_type.get_size() != 1:
TST( yPointer, input_type.get_size() - 1 )
BNE( return_misaligned_pointer )
TST( zPointer, zPointer )
BEQ( return_null_pointer )
if output_type.get_size() != 1:
TST( zPointer, output_type.get_size() - 1 )
BNE( return_misaligned_pointer )
SUBS( length, batch_elements )
BLO( batch_process_finish )
LABEL( process_batch_prologue )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
if i >= instruction_offset:
Function.get_current().add_instruction(instruction_column[i - instruction_offset])
SUBS( length, batch_elements )
BLO( process_batch_epilogue )
LABEL( process_batch )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
Function.get_current().add_instruction(instruction_column[(i - instruction_offset) % max_instructions])
SUBS( length, batch_elements )
BHS( process_batch )
LABEL( process_batch_epilogue )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
if i < instruction_offset:
Function.get_current().add_instruction(instruction_column[(i - instruction_offset) % max_instructions])
LABEL( batch_process_finish )
ADDS( length, batch_elements )
BEQ( return_ok )
LABEL( process_single )
scalar_function(xPointer, yPointer, zPointer)
SUBS( length, 1 )
BNE( process_single )
LABEL( return_ok )
MOV( r0, 0 )
LABEL( return_any )
RETURN()
LABEL( return_null_pointer )
RETURN( 1 )
if input_type.get_size() != 1 or output_type.get_size() != 1:
LABEL( return_misaligned_pointer )
RETURN( 2 )
def PipelineMap_VXusfVSusf_VYusf(xPointer, y, zPointer, length, batch_elements, input_type, output_type, scalar_function, instruction_columns, instruction_offsets):
# Check that we have an offset for each instruction column
assert len(instruction_columns) == len(instruction_offsets)
max_instructions = max(map(len, instruction_columns))
return_ok = Label("return_ok")
return_null_pointer = Label("return_null_pointer")
return_misaligned_pointer = Label("return_misaligned_pointer")
return_any = Label("return")
batch_process_finish = Label("batch_process_finish")
process_single = Label("process_single")
process_batch = Label("process_batch")
process_batch_prologue = Label("process_batch_prologue")
process_batch_epilogue = Label("process_batch_epilogue")
# Check parameters
TST( xPointer, xPointer )
BEQ( return_null_pointer )
if input_type.get_size() != 1:
TST( xPointer, input_type.get_size() - 1 )
BNE( return_misaligned_pointer )
TST( zPointer, zPointer )
BEQ( return_null_pointer )
if output_type.get_size() != 1:
TST( zPointer, output_type.get_size() - 1 )
BNE( return_misaligned_pointer )
SUBS( length, batch_elements )
BLO( batch_process_finish )
LABEL( process_batch_prologue )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
if i >= instruction_offset:
Function.get_current().add_instruction(instruction_column[i - instruction_offset])
SUBS( length, batch_elements )
BLO( process_batch_epilogue )
LABEL( process_batch )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
Function.get_current().add_instruction(instruction_column[(i - instruction_offset) % max_instructions])
SUBS( length, batch_elements )
BHS( process_batch )
LABEL( process_batch_epilogue )
for i in range(max_instructions):
for instruction_column, instruction_offset in zip(instruction_columns, instruction_offsets):
if i < instruction_offset:
Function.get_current().add_instruction(instruction_column[(i - instruction_offset) % max_instructions])
LABEL( batch_process_finish )
ADDS( length, batch_elements )
BEQ( return_ok )
LABEL( process_single )
scalar_function(xPointer, y, zPointer)
SUBS( length, 1 )
BNE( process_single )
LABEL( return_ok )
MOV( r0, 0 )
LABEL( return_any )
RETURN()
LABEL( return_null_pointer )
RETURN( 1 )
if input_type.get_size() != 1 or output_type.get_size() != 1:
LABEL( return_misaligned_pointer )
RETURN( 2 )
def AddSubMul_VXusVXus_VXus_NEON(codegen, function_signature, module, function, arguments, assembly_cache = dict(), error_diagnostics_mode = False):
if codegen.abi.name in ['arm-softeabi', 'arm-hardeabi']:
if module == 'Core':
if function in ['Add', 'Subtract']:
x_argument, y_argument, z_argument, length_argument = tuple(arguments)
if function_signature in ['V8sV8s_V8s', 'V16sV16s_V16s', 'V32sV32s_V32s', 'V64sV64s_V64s', 'V32fV32f_V32f']:
if function != "Multiply" or function_signature != 'V64sV64s_V64s':
ctype = x_argument.get_type().get_primitive_type()
else:
return
def PROCESS_SCALAR(xPointer, yPointer, zPointer):
SCALAR.AddSubtractMultiply_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, ctype, ctype, function)
VLOAD = { 1: VLD1.I8, 2: VLD1.I16, 4: VLD1.I32, 8: VLD1.I64 }[ctype.get_size()]
VSTORE = { 1: VST1.I8, 2: VST1.I16, 4: VST1.I32, 8: VST1.I64 }[ctype.get_size()]
if ctype.is_integer():
if function == 'Add':
VCOMPUTE = { 1: VADD.I8, 2: VADD.I16, 4: VADD.I32, 8: VADD.I64 }[ctype.get_size()]
elif function == 'Subtract':
VCOMPUTE = { 1: VSUB.I8, 2: VSUB.I16, 4: VSUB.I32, 8: VSUB.I64 }[ctype.get_size()]
elif function == 'Multiply':
VCOMPUTE = { 1: VMUL.I8, 2: VMUL.I16, 4: VMUL.I32 }[ctype.get_size()]
elif ctype.is_floating_point():
VCOMPUTE = { 'Add': VADD.F32, 'Subtract': VSUB.F32, 'Multiply': VMUL.F32 }[function]
with Function(codegen, "yep" + module + "_" + function + "_" + function_signature, arguments, 'CortexA9', assembly_cache = assembly_cache, collect_origin = bool(error_diagnostics_mode), check_only = bool(error_diagnostics_mode)):
xPointer, yPointer, zPointer, length = LOAD.PARAMETERS()
unroll_registers = 6
register_size = 16
batch_elements = unroll_registers * register_size / ctype.get_size()
Qx = [QRegister() for _ in range(unroll_registers)]
Qy = [QRegister() for _ in range(unroll_registers)]
instruction_offsets = (0, 0, 1, 1, 2)
instruction_columns = [InstructionStream() for _ in range(5)]
for i in range(0, unroll_registers, 2):
with instruction_columns[0]:
VLOAD( (Qx[i].get_low_part(), Qx[i].get_high_part(), Qx[i+1].get_low_part(), Qx[i+1].get_high_part()), [xPointer.wb()] )
with instruction_columns[1]:
VLOAD( (Qy[i].get_low_part(), Qy[i].get_high_part(), Qy[i+1].get_low_part(), Qy[i+1].get_high_part()), [yPointer.wb()] )
with instruction_columns[2]:
VCOMPUTE( Qx[i], Qy[i] )
with instruction_columns[3]:
VCOMPUTE( Qx[i+1], Qy[i+1] )
with instruction_columns[4]:
VSTORE( (Qx[i].get_low_part(), Qx[i].get_high_part(), Qx[i+1].get_low_part(), Qx[i+1].get_high_part()), [zPointer.wb()] )
PipelineMap_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, length, batch_elements, ctype, ctype, PROCESS_SCALAR, instruction_columns, instruction_offsets)
def AddSubMul_VXusVXus_VYus_NEON(codegen, function_signature, module, function, arguments, assembly_cache = dict(), error_diagnostics_mode = False):
if codegen.abi.name in ['arm-softeabi', 'arm-hardeabi']:
if module == 'Core':
if function in ['Add', 'Subtract']:
x_argument, y_argument, z_argument, length_argument = tuple(arguments)
if function_signature in ['V8uV8u_V16u', 'V16uV16u_V32u',
'V8sV8s_V16s', 'V16sV16s_V32s']:
input_type = x_argument.get_type().get_primitive_type()
output_type = z_argument.get_type().get_primitive_type()
else:
return
def PROCESS_SCALAR(xPointer, yPointer, zPointer):
SCALAR.AddSubtractMultiply_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, input_type, output_type, function)
VLOAD = { 1: VLD1.I8, 2: VLD1.I16, 4: VLD1.I32, 8: VLD1.I64 }[input_type.get_size()]
VSTORE = { 1: VST1.I8, 2: VST1.I16, 4: VST1.I32, 8: VST1.I64 }[input_type.get_size()]
if function == 'Add':
if input_type.is_signed_integer():
VCOMPUTE = { 1: VADDL.S8, 2: VADDL.S16, 4: VADDL.S32 }[input_type.get_size()]
else:
VCOMPUTE = { 1: VADDL.U8, 2: VADDL.U16, 4: VADDL.U32 }[input_type.get_size()]
elif function == 'Subtract':
if input_type.is_signed_integer():
VCOMPUTE = { 1: VSUBL.S8, 2: VSUBL.S16, 4: VSUBL.S32 }[input_type.get_size()]
else:
VCOMPUTE = { 1: VSUBL.U8, 2: VSUBL.U16, 4: VSUBL.U32 }[input_type.get_size()]
elif function == 'Multiply':
if input_type.is_signed_integer():
VCOMPUTE = { 1: VMULL.S8, 2: VMULL.S16, 4: VMULL.S32 }[input_type.get_size()]
else:
VCOMPUTE = { 1: VMULL.U8, 2: VMULL.U16, 4: VMULL.U32 }[input_type.get_size()]
with Function(codegen, "yep" + module + "_" + function + "_" + function_signature, arguments, 'CortexA9', assembly_cache = assembly_cache, collect_origin = bool(error_diagnostics_mode), check_only = bool(error_diagnostics_mode)):
xPointer, yPointer, zPointer, length = LOAD.PARAMETERS()
unroll_registers = 3
register_size = 16
batch_elements = unroll_registers * register_size / input_type.get_size()
Qx = [QRegister() for _ in range(unroll_registers)]
Qy = [QRegister() for _ in range(unroll_registers)]
Qz = [QRegister() for _ in range(unroll_registers * 2)]
instruction_offsets = (0, 0, 1, 1, 2)
instruction_columns = [InstructionStream() for _ in range(5)]
for i in range(0, unroll_registers):
with instruction_columns[0]:
VLOAD( (Qx[i].get_low_part(), Qx[i].get_high_part()), [xPointer.wb()] )
with instruction_columns[1]:
VLOAD( (Qy[i].get_low_part(), Qy[i].get_high_part()), [yPointer.wb()] )
with instruction_columns[2]:
VCOMPUTE( Qz[2*i], Qx[i].get_low_part(), Qy[i].get_low_part() )
with instruction_columns[3]:
VCOMPUTE( Qz[2*i+1], Qx[i].get_high_part(), Qy[i].get_high_part() )
with instruction_columns[4]:
VSTORE( (Qz[2*i].get_low_part(), Qz[2*i].get_high_part(), Qz[2*i+1].get_low_part(), Qz[2*i+1].get_high_part()), [zPointer.wb()] )
PipelineMap_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, length, batch_elements, input_type, output_type, PROCESS_SCALAR, instruction_columns, instruction_offsets)
def AddSubMul_VXusVXus_VXus_VFPv3(codegen, function_signature, module, function, arguments, assembly_cache = dict(), error_diagnostics_mode = False):
if codegen.abi.name in ['arm-softeabi', 'arm-hardeabi']:
if module == 'Core':
if function in ['Add', 'Subtract', 'Multiply']:
x_argument, y_argument, z_argument, length_argument = tuple(arguments)
if function_signature in ['V64fV64f_V64f']:
ctype = x_argument.get_type().get_primitive_type()
else:
return
def PROCESS_SCALAR(xPointer, yPointer, zPointer):
SCALAR.AddSubtractMultiply_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, ctype, ctype, function)
VCOMPUTE = { ('Add', 4): VADD.F32, ('Subtract', 4): VSUB.F32, ('Multiply', 4): VMUL.F32,
('Add', 8): VADD.F64, ('Subtract', 8): VSUB.F64, ('Multiply', 8): VMUL.F64 }[function, ctype.get_size()]
with Function(codegen, "yep" + module + "_" + function + "_" + function_signature, arguments, 'CortexA9', assembly_cache = assembly_cache, collect_origin = bool(error_diagnostics_mode), check_only = bool(error_diagnostics_mode)):
xPointer, yPointer, zPointer, length = LOAD.PARAMETERS()
unroll_registers = { 4: 12, 8: 8 }[ctype.get_size()]
SDx = [{ 4: SRegister(), 8: DRegister() }[ctype.get_size()] for _ in range(unroll_registers)]
SDy = [{ 4: SRegister(), 8: DRegister() }[ctype.get_size()] for _ in range(unroll_registers)]
instruction_offsets = { 4: (0, 1, 3, 4, 5), 8: (0, 0, 1, 2, 3) }[ctype.get_size()]
instruction_columns = [InstructionStream() for _ in range(5)]
for i in range(0, unroll_registers, 2):
with instruction_columns[0]:
VLDM( xPointer.wb(), tuple(SDx[i:i+2]) )
with instruction_columns[1]:
VLDM( yPointer.wb(), tuple(SDy[i:i+2]) )
with instruction_columns[2]:
VCOMPUTE( SDx[i], SDy[i] )
with instruction_columns[3]:
VCOMPUTE( SDx[i+1], SDy[i+1] )
with instruction_columns[4]:
VSTM( zPointer.wb(), tuple(SDx[i:i+2]) )
PipelineMap_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, length, unroll_registers, ctype, ctype, PROCESS_SCALAR, instruction_columns, instruction_offsets)
def MinMax_VXusVXus_VXus_NEON(codegen, function_signature, module, function, arguments, assembly_cache = dict(), error_diagnostics_mode = False):
if codegen.abi.name in ['arm-softeabi', 'arm-hardeabi']:
if module == 'Core':
if function in ['Min', 'Max']:
x_argument, y_argument, z_argument, length_argument = tuple(arguments)
if function_signature in ['V8uV8u_V8u', 'V16uV16u_V16u', 'V32uV32u_V32u', 'V8sV8s_V8s', 'V16sV16s_V16s', 'V32sV32s_V32s']:
ctype = x_argument.get_type().get_primitive_type()
else:
return
def PROCESS_SCALAR(xPointer, yPointer, zPointer):
SCALAR.MinMax_VXusVXus_VYus(xPointer, yPointer, zPointer, ctype, function)
VLOAD = { 1: VLD1.I8, 2: VLD1.I16, 4: VLD1.I32, 8: VLD1.I64 }[ctype.get_size()]
VSTORE = { 1: VST1.I8, 2: VST1.I16, 4: VST1.I32, 8: VST1.I64 }[ctype.get_size()]
if function == 'Min':
if ctype.is_unsigned_integer():
VCOMPUTE = { 1: VMIN.U8, 2: VMIN.U16, 4: VMIN.U32 }[ctype.get_size()]
else:
VCOMPUTE = { 1: VMIN.S8, 2: VMIN.S16, 4: VMIN.S32 }[ctype.get_size()]
elif function == 'Max':
if ctype.is_unsigned_integer():
VCOMPUTE = { 1: VMAX.U8, 2: VMAX.U16, 4: VMAX.U32 }[ctype.get_size()]
else:
VCOMPUTE = { 1: VMAX.S8, 2: VMAX.S16, 4: VMAX.S32 }[ctype.get_size()]
with Function(codegen, "yep" + module + "_" + function + "_" + function_signature, arguments, 'CortexA9', assembly_cache = assembly_cache, collect_origin = bool(error_diagnostics_mode), check_only = bool(error_diagnostics_mode)):
xPointer, yPointer, zPointer, length = LOAD.PARAMETERS()
unroll_registers = 6
register_size = 16
batch_elements = unroll_registers * register_size / ctype.get_size()
Qx = [QRegister() for _ in range(unroll_registers)]
Qy = [QRegister() for _ in range(unroll_registers)]
instruction_offsets = (0, 0, 1, 1, 2)
instruction_columns = [InstructionStream() for _ in range(5)]
for i in range(0, unroll_registers, 2):
with instruction_columns[0]:
VLOAD( (Qx[i].get_low_part(), Qx[i].get_high_part(), Qx[i+1].get_low_part(), Qx[i+1].get_high_part()), [xPointer.wb()] )
with instruction_columns[1]:
VLOAD( (Qy[i].get_low_part(), Qy[i].get_high_part(), Qy[i+1].get_low_part(), Qy[i+1].get_high_part()), [yPointer.wb()] )
with instruction_columns[2]:
VCOMPUTE( Qx[i], Qy[i] )
with instruction_columns[3]:
VCOMPUTE( Qx[i+1], Qy[i+1] )
with instruction_columns[4]:
VSTORE( (Qx[i].get_low_part(), Qx[i].get_high_part(), Qx[i+1].get_low_part(), Qx[i+1].get_high_part()), [zPointer.wb()] )
PipelineMap_VXusfVXusf_VYusf(xPointer, yPointer, zPointer, length, batch_elements, ctype, ctype, PROCESS_SCALAR, instruction_columns, instruction_offsets)
| 43.805139
| 234
| 0.676443
| 2,635
| 20,457
| 4.995066
| 0.088805
| 0.02606
| 0.023401
| 0.021881
| 0.874335
| 0.855113
| 0.81287
| 0.782784
| 0.760903
| 0.733323
| 0
| 0.029753
| 0.194946
| 20,457
| 466
| 235
| 43.899142
| 0.769446
| 0.014812
| 0
| 0.706044
| 0
| 0
| 0.046858
| 0.007021
| 0
| 0
| 0
| 0
| 0.008242
| 1
| 0.035714
| false
| 0
| 0.002747
| 0
| 0.052198
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e2124461be56773720b9169c7daa0aa7904c8ebd
| 40,405
|
py
|
Python
|
kernel/conv.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | 1
|
2021-11-29T23:51:15.000Z
|
2021-11-29T23:51:15.000Z
|
kernel/conv.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | null | null | null |
kernel/conv.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | null | null | null |
import torch
import math
from UnarySim.stream.gen import RNG, RNGMulti, SourceGen, BSGen, BSGenMulti
from UnarySim.kernel.utils import conv2d_output_shape, num2tuple
from UnarySim.kernel.linear import HUBLinearFunction
from UnarySim.kernel.linear import HUBLinearFunction_flex
from UnarySim.kernel.linear import FxpLinearFunction
from UnarySim.kernel.linear import TlutLinearFunction
from UnarySim.kernel.add import FSUAdd
from torch.cuda.amp import autocast
class FSUConv2d(torch.nn.Module):
"""
This module is for convolution with unary input and output
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
scaled=True,
scale=None,
depth=12,
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSUConv2d, self).__init__()
self.stype = stype
self.PC = FSUConv2dPC(in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
binary_weight=binary_weight,
binary_bias=binary_bias,
bitwidth=bitwidth,
mode=mode,
btype=btype,
rtype=rtype,
stype=stype)
if scaled is True:
if scale is None:
scale_add = math.prod(num2tuple(kernel_size)) * in_channels + bias
else:
scale_add = scale
else:
scale_add = 1.0
self.ACC = FSUAdd(mode=mode,
scaled=scaled,
scale=scale_add,
dim=0,
depth=depth,
entry=math.prod(num2tuple(kernel_size)) * in_channels + bias,
stype=stype)
@autocast()
def forward(self, input, scale=None, entry=None):
pc = self.PC(input)
output = self.ACC(pc.unsqueeze(0), scale, entry)
return output.type(self.stype)
class FSUConv2dPC(torch.nn.Conv2d):
"""
This module is for convolution with unary input and output
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSUConv2dPC, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.has_bias = bias
self.mode = mode
self.stype = stype
self.btype = btype
self.rtype = rtype
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
self.mode = mode
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the linear weight and bias
if binary_weight is not None:
self.weight.data = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
if bias and (binary_bias is not None):
self.bias.data = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
# define the kernel linear
self.weight_bsg = BSGen(self.weight.view(1, self.weight.size()[0], -1), self.rng, stype=stype)
self.weight_rng_idx = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).view(1, self.weight.size()[0], -1)
if self.has_bias is True:
self.bias_bsg = BSGen(self.bias, self.rng, stype=stype)
self.bias_rng_idx = torch.nn.Parameter(torch.zeros_like(self.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.weight_bsg_inv = BSGen(self.weight.view(1, self.weight.size()[0], -1), self.rng, stype=stype)
self.weight_rng_idx_inv = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).view(1, self.weight.size()[0], -1)
# indicator of even/odd cycle
self.even_cycle_flag = torch.nn.Parameter(torch.ones(1, dtype=torch.bool), requires_grad=False)
self.padding_0 = torch.nn.ConstantPad2d(self.padding, 0)
self.padding_1 = torch.nn.ConstantPad2d(self.padding, 1)
self.bipolar_mode = torch.nn.Parameter(torch.tensor([self.mode == "bipolar"], dtype=torch.bool), requires_grad=False)
def FSUConv2d_PC(self, input):
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
if True in self.even_cycle_flag:
input_padding = self.padding_0(input)
else:
input_padding = self.padding_1(input)
# if unipolar mode, even_cycle_flag is always False to pad 0.
self.even_cycle_flag.data = self.bipolar_mode ^ self.even_cycle_flag
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input_padding, self.kernel_size, self.dilation, 0, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, 1, input_transpose.size()[-1])
# first dim should always be batch
batch = input_reshape.size()[0]
# generate weight and bias bits for current cycle
weight_bs = self.weight_bsg(self.weight_rng_idx).type(torch.float)
if weight_bs.size()[0] != batch:
weight_bs = torch.cat(batch*[weight_bs], 0)
self.weight_rng_idx = torch.cat(batch*[self.weight_rng_idx], 0)
torch.add(self.weight_rng_idx, input_reshape.type(torch.long), out=self.weight_rng_idx)
kernel_out = torch.empty(0, device=input.device)
torch.matmul(input_reshape.type(torch.float), weight_bs.transpose(1, 2), out=kernel_out)
kernel_out.squeeze_(1)
kernel_out_reshape = kernel_out.reshape(input.size()[0], -1, kernel_out.size()[-1])
kernel_out_transpose = kernel_out_reshape.transpose(1, 2)
kernel_out_fold = torch.nn.functional.fold(kernel_out_transpose, output_size, (1, 1))
if self.has_bias is True:
bias_bs = self.bias_bsg(self.bias_rng_idx).type(torch.float)
self.bias_rng_idx.add_(1)
kernel_out_fold += bias_bs.view(1, -1, 1, 1).expand_as(kernel_out_fold)
if self.mode == "unipolar":
return kernel_out_fold
if self.mode == "bipolar":
# generate weight and bias bits for current cycle
weight_bs_inv = 1 - self.weight_bsg_inv(self.weight_rng_idx_inv).type(torch.float)
if weight_bs_inv.size()[0] != batch:
weight_bs_inv = torch.cat(batch*[weight_bs_inv], 0)
self.weight_rng_idx_inv = torch.cat(batch*[self.weight_rng_idx_inv], 0)
torch.add(self.weight_rng_idx_inv, 1 - input_reshape.type(torch.long), out=self.weight_rng_idx_inv)
kernel_out_inv = torch.empty(0, device=input.device)
torch.matmul(1 - input_reshape.type(torch.float), weight_bs_inv.transpose(1, 2), out=kernel_out_inv)
kernel_out_inv.squeeze_(1)
kernel_out_reshape_inv = kernel_out_inv.reshape(input.size()[0], -1, kernel_out_inv.size()[-1])
kernel_out_transpose_inv = kernel_out_reshape_inv.transpose(1, 2)
kernel_out_fold_inv = torch.nn.functional.fold(kernel_out_transpose_inv, output_size, (1, 1))
return kernel_out_fold + kernel_out_fold_inv
@autocast()
def forward(self, input):
return self.FSUConv2d_PC(input).type(self.stype)
class FSUConv2duGEMM(torch.nn.Conv2d):
"""
This module is for convolution with unary input and output
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
bitwidth=8,
mode="bipolar",
scaled=True,
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(FSUConv2duGEMM, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.has_bias = bias
self.mode = mode
self.scaled = scaled
self.stype = stype
self.btype = btype
self.rtype = rtype
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
# upper bound for accumulation counter in scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(math.prod(num2tuple(self.kernel_size)) * in_channels)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode == "unipolar":
pass
elif mode == "bipolar":
self.offset.add_((math.prod(num2tuple(self.kernel_size)) * in_channels-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("FSUConv2d mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the linear weight and bias
if binary_weight is not None:
self.weight.data = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
if bias and (binary_bias is not None):
self.bias.data = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
# define the kernel linear
self.weight_bsg = BSGen(self.weight.view(1, self.weight.size()[0], -1), self.rng, stype=stype)
self.weight_rng_idx = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).view(1, self.weight.size()[0], -1)
if self.has_bias is True:
self.bias_bsg = BSGen(self.bias, self.rng, stype=stype)
self.bias_rng_idx = torch.nn.Parameter(torch.zeros_like(self.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode == "bipolar":
self.weight_bsg_inv = BSGen(self.weight.view(1, self.weight.size()[0], -1), self.rng, stype=stype)
self.weight_rng_idx_inv = torch.nn.Parameter(torch.zeros_like(self.weight, dtype=torch.long), requires_grad=False).view(1, self.weight.size()[0], -1)
self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if self.scaled is False:
self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
# indicator of even/odd cycle
self.even_cycle_flag = torch.nn.Parameter(torch.ones(1, dtype=torch.bool), requires_grad=False)
self.padding_0 = torch.nn.ConstantPad2d(self.padding, 0)
self.padding_1 = torch.nn.ConstantPad2d(self.padding, 1)
self.bipolar_mode = torch.nn.Parameter(torch.tensor([self.mode == "bipolar"], dtype=torch.bool), requires_grad=False)
def FSUKernel_accumulation(self, input):
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
if True in self.even_cycle_flag:
input_padding = self.padding_0(input)
else:
input_padding = self.padding_1(input)
# if unipolar mode, even_cycle_flag is always False to pad 0.
self.even_cycle_flag.data = self.bipolar_mode ^ self.even_cycle_flag
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input_padding, self.kernel_size, self.dilation, 0, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, 1, input_transpose.size()[-1])
# first dim should always be batch
batch = input_reshape.size()[0]
# generate weight and bias bits for current cycle
weight_bs = self.weight_bsg(self.weight_rng_idx).type(torch.float)
if weight_bs.size()[0] != batch:
weight_bs = torch.cat(batch*[weight_bs], 0)
self.weight_rng_idx = torch.cat(batch*[self.weight_rng_idx], 0)
torch.add(self.weight_rng_idx, input_reshape.type(torch.long), out=self.weight_rng_idx)
kernel_out = torch.empty(0, device=input.device)
torch.matmul(input_reshape.type(torch.float), weight_bs.transpose(1, 2), out=kernel_out)
kernel_out.squeeze_(1)
kernel_out_reshape = kernel_out.reshape(input.size()[0], -1, kernel_out.size()[-1])
kernel_out_transpose = kernel_out_reshape.transpose(1, 2)
kernel_out_fold = torch.nn.functional.fold(kernel_out_transpose, output_size, (1, 1))
if self.has_bias is True:
bias_bs = self.bias_bsg(self.bias_rng_idx).type(torch.float)
self.bias_rng_idx.add_(1)
kernel_out_fold += bias_bs.view(1, -1, 1, 1).expand_as(kernel_out_fold)
if self.mode == "unipolar":
return kernel_out_fold
if self.mode == "bipolar":
# generate weight and bias bits for current cycle
weight_bs_inv = 1 - self.weight_bsg_inv(self.weight_rng_idx_inv).type(torch.float)
if weight_bs_inv.size()[0] != batch:
weight_bs_inv = torch.cat(batch*[weight_bs_inv], 0)
self.weight_rng_idx_inv = torch.cat(batch*[self.weight_rng_idx_inv], 0)
torch.add(self.weight_rng_idx_inv, 1 - input_reshape.type(torch.long), out=self.weight_rng_idx_inv)
kernel_out_inv = torch.empty(0, device=input.device)
torch.matmul(1 - input_reshape.type(torch.float), weight_bs_inv.transpose(1, 2), out=kernel_out_inv)
kernel_out_inv.squeeze_(1)
kernel_out_reshape_inv = kernel_out_inv.reshape(input.size()[0], -1, kernel_out_inv.size()[-1])
kernel_out_transpose_inv = kernel_out_reshape_inv.transpose(1, 2)
kernel_out_fold_inv = torch.nn.functional.fold(kernel_out_transpose_inv, output_size, (1, 1))
return kernel_out_fold + kernel_out_fold_inv
@autocast()
def forward(self, input):
kernel_out_total = self.FSUKernel_accumulation(input)
self.accumulator.data = self.accumulator.add(kernel_out_total)
if self.scaled is True:
output = torch.ge(self.accumulator, self.acc_bound).type(torch.float)
self.accumulator.sub_(output * self.acc_bound)
else:
self.accumulator.sub_(self.offset)
output = torch.gt(self.accumulator, self.out_accumulator).type(torch.float)
self.out_accumulator.data = self.out_accumulator.add(output)
return output.type(self.stype)
class HUBConv2d(torch.nn.Conv2d):
"""
This module is the 2d conv layer, with binary input and binary output
This cycle is the mac cycle using unipolar umul, i.e., half the bipolar umul.
As such, cycle = 2 ^ (bitwidth - 1).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
rng="Sobol",
cycle=128,
rounding="round"):
super(HUBConv2d, self).__init__(in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode)
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# mac computing cycle
self.cycle = cycle
# bitwidth of rng
self.bitwidth = (self.cycle - 1).bit_length()
# random_sequence from sobol RNG
self.irng = RNG(self.bitwidth, 1, rng)()
self.wrng = RNG(self.bitwidth, 1, "Sobol")()
# generate the value map for mul using current rng
# dim 0 is input index
# the tensor input value is the actual value produced by the rng
self.input_map = torch.nn.Parameter(torch.empty(cycle), requires_grad=False)
input_val_cycle = torch.empty(0)
torch.cat(cycle*[torch.as_tensor([c for c in range(cycle)], dtype=torch.float).unsqueeze(1)], 1, out=input_val_cycle)
input_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.irng.unsqueeze(0), out=input_bit_cycle)
self.input_map.data = torch.sum(input_bit_cycle, 1).squeeze_().type(torch.long)
# dim 0 is input index, dim 1 is weight index
# the tensor value is the actual weight value produced by the rng, under a specific input and weight
self.wght_map = torch.nn.Parameter(torch.empty(cycle, cycle), requires_grad=False)
wght_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.wrng.unsqueeze(0), out=wght_bit_cycle)
for c in range(cycle):
self.wght_map.data[c] = torch.sum(wght_bit_cycle[:, 0:self.input_map.data[c]], 1).squeeze_()
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
input_max_int = input.abs().max().log2()
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
wght_max_int = wght_max_int.ceil()
self.rshift_input = input_max_int - self.bitwidth
self.rshift_wght = wght_max_int - self.bitwidth
self.rshift_output = self.bitwidth - input_max_int - wght_max_int
# all data are in NCHW
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, input_transpose.size()[-1])
weight = self.weight.view(self.weight.size()[0], -1)
mm_out = HUBLinearFunction.apply(input_reshape, weight, None, self.rshift_input, self.rshift_wght, self.rshift_output, self.cycle, self.wght_map)
mm_out_reshape = mm_out.reshape(input.size()[0], -1, mm_out.size()[-1])
mm_out_transpose = mm_out_reshape.transpose(1, 2)
output = torch.nn.functional.fold(mm_out_transpose, output_size, (1, 1))
if self.bias is None:
return output
else:
return output + self.bias.view([1, self.bias.size()[0], 1, 1])
class HUBConv2d_flex(torch.nn.Conv2d):
"""
This module is the 2d conv layer, with binary input and binary output
This module support flexible input and weight precision
bitwidth has to be a tuple for (input, weight)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
rng="Sobol",
bitwidth=None,
rounding="round"):
super(HUBConv2d_flex, self).__init__(in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode)
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
else: raise ValueError("HUBConv2dFlex layer only supports explict bitwidth tuple assignment.")
# bitwidth of rng
# self.bitwidth = (self.cycle - 1).bit_length()
# which ever is the smaller bitwidth, repeat that bitstream to do population count
ratio = int(2**max(self.bw_wght, self.bw_input) / 2**min(self.bw_wght, self.bw_input))
self.max_bw = max(self.bw_wght, self.bw_input)
cycle = 2 ** self.max_bw
input_repeat = 1
wght_repeat = 1
if self.bw_input > self.bw_wght:
wght_repeat = ratio
elif self.bw_input < self.bw_wght:
input_repeat = ratio
else: pass
# random_sequence from sobol RNG
self.irng = RNG(self.bw_input, 1, rng)().repeat(input_repeat) # temporal input
self.wrng = RNG(self.bw_wght, 1, "Sobol")().repeat(wght_repeat) # rate weight
# print("rng sizes ", self.irng.size(), self.wrng.size())
# generate the value map for mul using current rng
# dim 0 is input index
# the tensor input value is the actual value produced by the rng
self.input_map = torch.nn.Parameter(torch.empty(cycle), requires_grad=False)
input_val_cycle = torch.empty(0)
torch.cat(cycle*[torch.as_tensor([c for c in range(cycle)], dtype=torch.float).unsqueeze(1)], 1, out=input_val_cycle)
input_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.irng.unsqueeze(0), out=input_bit_cycle)
self.input_map.data = torch.sum(input_bit_cycle, 1).squeeze_().type(torch.long)
# dim 0 is input index, dim 1 is weight index
# the tensor value is the actual weight value produced by the rng, under a specific input and weight
self.wght_map = torch.nn.Parameter(torch.empty(cycle, cycle), requires_grad=False)
wght_bit_cycle = torch.empty(0)
torch.gt(input_val_cycle, self.wrng.unsqueeze(0), out=wght_bit_cycle)
for c in range(cycle):
self.wght_map.data[c] = torch.sum(wght_bit_cycle[:, 0:self.input_map.data[c]], 1).squeeze_()
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
input_max_int = input.abs().max().log2()
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
wght_max_int = wght_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
self.rshift_wght = wght_max_int - self.bw_wght
self.rshift_output = self.max_bw - input_max_int - wght_max_int
# all data are in NCHW
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, input_transpose.size()[-1])
weight = self.weight.view(self.weight.size()[0], -1)
mm_out = HUBLinearFunction_flex.apply(input_reshape, weight, None, self.rshift_input, self.rshift_wght, self.rshift_output, self.bw_input, self.bw_wght, self.wght_map)
mm_out_reshape = mm_out.reshape(input.size()[0], -1, mm_out.size()[-1])
mm_out_transpose = mm_out_reshape.transpose(1, 2)
output = torch.nn.functional.fold(mm_out_transpose, output_size, (1, 1))
if self.bias is None:
return output
else:
return output + self.bias.view([1, self.bias.size()[0], 1, 1])
class TlutConv2d(torch.nn.Conv2d):
"""
This module is the 2d conv layer, with binary input and binary output
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
bitwidth=8,
cycle = None,
rounding="round"):
super(TlutConv2d, self).__init__(in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode)
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# bitwidth of abs
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
# max abs value
self.max_abs_input = 2**self.bw_input
self.max_abs_wght = 2**self.bw_wght
# rounding mode
self.rounding = rounding
# early termination cycle
self.cycle = cycle
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
# Preparing input shift value
if self.rshift_input is None:
input_max_int = input.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
# Preparing weight shift value
if self.rshift_wght is None:
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
wght_max_int = wght_max_int.ceil()
self.rshift_wght = wght_max_int - self.bw_wght
# Preparing output shift value
if self.rshift_output is None:
self.rshift_output = 0 - self.rshift_input - self.rshift_wght
# Preparing input clamp value based on cycle
self.input_clamp_val = 2**self.bw_input
if self.cycle != None and self.cycle < 2**self.bw_input-1:
self.input_clamp_val = self.cycle
# print("input_clamp_val=", self.input_clamp_val)
# Precompute output kernel size based on filter size, padding, dilation, stride, etc.
# all data are in NCHW
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, input_transpose.size()[-1])
weight = self.weight.view(self.weight.size()[0], -1)
mm_out = TlutLinearFunction.apply(input_reshape, weight, None, self.rshift_input, self.rshift_wght, self.rshift_output, self.max_abs_input, self.max_abs_wght, self.input_clamp_val)
mm_out_reshape = mm_out.reshape(input.size()[0], -1, mm_out.size()[-1])
mm_out_transpose = mm_out_reshape.transpose(1, 2)
output = torch.nn.functional.fold(mm_out_transpose, output_size, (1, 1))
if self.bias is None:
return output
else:
return output + self.bias.view([1, self.bias.size()[0], 1, 1])
class FxpConv2d(torch.nn.Conv2d):
"""
This module is the 2d conv layer, with binary input and binary output
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
binary_weight=None,
binary_bias=None,
bitwidth=8,
keep_res="input", # keep the resolution of input/output
more_res="input", # assign more resolution to input/weight
rounding="round"):
super(FxpConv2d, self).__init__(in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode)
assert groups==1, "Supported group number is 1."
assert padding_mode=='zeros', "Supported padding_mode number is 'zeros'."
# weight and bias
if binary_weight is not None:
self.weight.data = binary_weight
if bias and (binary_bias is not None):
self.bias.data = binary_bias
# bitwidth of abs
if isinstance(bitwidth, tuple):
self.bw_input, self.bw_wght = (bitwidth[0]-1, bitwidth[1]-1)
else:
if keep_res == "input":
self.bw_input, self.bw_wght = (bitwidth-1, bitwidth-1)
elif keep_res == "output":
if bitwidth % 2 == 0:
self.bw_input, self.bw_wght = (int(bitwidth/2 - 1), int(bitwidth/2 - 1))
else:
if more_res == "input":
self.bw_input, self.bw_wght = (int((bitwidth+1)/2 - 1), int((bitwidth-1)/2 - 1))
elif more_res == "weight":
self.bw_input, self.bw_wght = (int((bitwidth-1)/2 - 1), int((bitwidth+1)/2 - 1))
else:
raise ValueError("more_res should be either 'input' or 'weight' when bitwidth is not a tuple and keep_res is 'output'.")
else:
raise ValueError("keep_res should be either 'input' or 'output' when bitwidth is not a tuple.")
# max abs value
self.max_abs_input = 2**self.bw_input
self.max_abs_wght = 2**self.bw_wght
# rounding mode
self.rounding = rounding
self.rshift_input = None
self.rshift_wght = None
self.rshift_output = None
@autocast()
def forward(self, input):
# See the autograd section for explanation of what happens here.
with torch.no_grad():
if self.rshift_input is None:
input_max_int = input.abs().max().log2()
if self.rounding == "round":
input_max_int = input_max_int.round()
elif self.rounding == "floor":
input_max_int = input_max_int.floor()
elif self.rounding == "ceil":
input_max_int = input_max_int.ceil()
self.rshift_input = input_max_int - self.bw_input
if self.rshift_wght is None:
wght_max_int = self.weight.abs().max().log2()
if self.rounding == "round":
wght_max_int = wght_max_int.round()
elif self.rounding == "floor":
wght_max_int = wght_max_int.floor()
elif self.rounding == "ceil":
wght_max_int = wght_max_int.ceil()
self.rshift_wght = wght_max_int - self.bw_wght
if self.rshift_output is None:
self.rshift_output = 0 - self.rshift_input - self.rshift_wght
# all data are in NCHW
output_size = conv2d_output_shape((input.size()[2], input.size()[3]), kernel_size=self.kernel_size, dilation=self.dilation, pad=self.padding, stride=self.stride)
# See the autograd section for explanation of what happens here.
input_im2col = torch.nn.functional.unfold(input, self.kernel_size, self.dilation, self.padding, self.stride)
input_transpose = input_im2col.transpose(1, 2)
input_reshape = input_transpose.reshape(-1, input_transpose.size()[-1])
weight = self.weight.view(self.weight.size()[0], -1)
mm_out = FxpLinearFunction.apply(input_reshape, weight, None, self.rshift_input, self.rshift_wght, self.rshift_output, self.max_abs_input, self.max_abs_wght)
mm_out_reshape = mm_out.reshape(input.size()[0], -1, mm_out.size()[-1])
mm_out_transpose = mm_out_reshape.transpose(1, 2)
output = torch.nn.functional.fold(mm_out_transpose, output_size, (1, 1))
if self.bias is None:
return output
else:
return output + self.bias.view([1, self.bias.size()[0], 1, 1])
| 46.982558
| 189
| 0.556392
| 4,797
| 40,405
| 4.478841
| 0.055868
| 0.01899
| 0.017407
| 0.017873
| 0.872562
| 0.863533
| 0.844124
| 0.840214
| 0.833
| 0.821457
| 0
| 0.015854
| 0.3506
| 40,405
| 860
| 190
| 46.982558
| 0.802965
| 0.094642
| 0
| 0.809826
| 0
| 0.001585
| 0.028601
| 0
| 0
| 0
| 0
| 0
| 0.019017
| 1
| 0.025357
| false
| 0.00317
| 0.015848
| 0.001585
| 0.07607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
355e75de025a37db47e9fd8a2338e68b060c39da
| 179
|
py
|
Python
|
pi1wire/_util.py
|
antonverburg/pi1wire
|
f9241cb8b12732f7a5a2f5310df8c2eaffb4f5d5
|
[
"MIT"
] | 1
|
2020-09-16T21:25:57.000Z
|
2020-09-16T21:25:57.000Z
|
pi1wire/_util.py
|
antonverburg/pi1wire
|
f9241cb8b12732f7a5a2f5310df8c2eaffb4f5d5
|
[
"MIT"
] | 1
|
2021-10-31T13:15:24.000Z
|
2021-11-27T12:50:21.000Z
|
pi1wire/_util.py
|
antonverburg/pi1wire
|
f9241cb8b12732f7a5a2f5310df8c2eaffb4f5d5
|
[
"MIT"
] | 1
|
2021-10-30T09:19:46.000Z
|
2021-10-30T09:19:46.000Z
|
def mac_to_dirname(mac_address: str) -> str:
return '%s-%s' % (mac_address[:2], mac_address[2:])
def dirname_to_mac(dirname: str) -> str:
return dirname.replace('-', '')
| 29.833333
| 55
| 0.648045
| 27
| 179
| 4.037037
| 0.37037
| 0.275229
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.150838
| 179
| 5
| 56
| 35.8
| 0.703947
| 0
| 0
| 0
| 0
| 0
| 0.03352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ea08e22d53a52cf586dbea8528a4283bcd9a5598
| 3,638
|
py
|
Python
|
daisychain/functional_tests/test_user_registration.py
|
daisychainme/daisychain
|
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
|
[
"MIT"
] | 5
|
2016-09-27T10:44:59.000Z
|
2022-03-29T08:16:44.000Z
|
daisychain/functional_tests/test_user_registration.py
|
daisychainme/daisychain
|
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
|
[
"MIT"
] | null | null | null |
daisychain/functional_tests/test_user_registration.py
|
daisychainme/daisychain
|
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
|
[
"MIT"
] | null | null | null |
from functional_tests.context import *
class UserRegisterTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(3)
# Set reCaptcha to Testing-Mode
os.environ['NORECAPTCHA_TESTING'] = 'True'
def tearDown(self):
self.browser.quit()
# Set reCaptcha to Testing-Mode
del os.environ['NORECAPTCHA_TESTING']
def test_user_registers_different_passwords(self):
# User story:
# Lisa opens her web browser and registers at daisychain.me.
self.browser.get('%s%s' % (self.live_server_url, '/accounts/signup/'))
# She enters her name, email address and password.
username_input = self.browser.find_element_by_name("username")
username_input.send_keys('H4x0r')
email_input = self.browser.find_element_by_name("email")
email_input.send_keys('42@1337.org')
password_input = self.browser.find_element_by_name("password1")
password_input.send_keys('LuckyLuke1234')
# TODO: what is the name of the tag?
password_confirm_input = self.browser.find_element_by_name("password2")
password_confirm_input.send_keys('LuckyLuke123')
self.browser.execute_script(
"document.getElementById('g-recaptcha-response')"
".style.display='block';")
captcha = self.browser.find_element_by_name("g-recaptcha-response")
captcha.send_keys('PASSED')
# A web page is rendered that says
# that a confirmation link has been sent.
self.browser.find_element_by_xpath('//button[@type="submit"]').click()
# self.browser.find_element_by_id('submit').click()
self.browser.implicitly_wait(30)
self.assertIn('accounts/signup/', self.browser.current_url)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('You must type the same password each time.', body.text)
def test_user_registers(self):
# User story:
# Lisa opens her web browser and registers at daisychain.me.
self.browser.get('%s%s' % (self.live_server_url, '/accounts/signup/'))
# She enters her name, email address and password.
username_input = self.browser.find_element_by_name("username")
username_input.send_keys('Lisa')
email_input = self.browser.find_element_by_name("email")
email_input.send_keys('daisychain_lisa@testingmail.org')
password_input = self.browser.find_element_by_name("password1")
password_input.send_keys('hunter22')
# TODO: what is the name of the tag?
password_confirm_input = self.browser.find_element_by_name("password2")
password_confirm_input.send_keys('hunter22')
self.browser.execute_script(
"document.getElementById('g-recaptcha-response')"
".style.display='block';")
captcha = self.browser.find_element_by_name("g-recaptcha-response")
captcha.send_keys('PASSED')
# A web page is rendered that says
# that a confirmation link has been sent.
self.browser.find_element_by_xpath('//button[@type="submit"]').click()
# self.browser.find_element_by_id('submit').click()
self.browser.implicitly_wait(30)
self.assertIn('accounts/confirm-email/', self.browser.current_url)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('We have sent an e-mail to you for verification',
body.text)
# The email has been sent.
self.assertEqual(len(mail.outbox), 1)
| 42.302326
| 79
| 0.668224
| 455
| 3,638
| 5.120879
| 0.281319
| 0.127468
| 0.103004
| 0.151073
| 0.777682
| 0.756223
| 0.756223
| 0.756223
| 0.756223
| 0.756223
| 0
| 0.010233
| 0.221001
| 3,638
| 85
| 80
| 42.8
| 0.811927
| 0.176471
| 0
| 0.509804
| 0
| 0
| 0.205843
| 0.081263
| 0
| 0
| 0
| 0.011765
| 0.098039
| 1
| 0.078431
| false
| 0.235294
| 0.019608
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ea10d2dd8181482d6dd5f7906e53e64a5f60f007
| 1,483
|
gyp
|
Python
|
deps/subversion/delta.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | null | null | null |
deps/subversion/delta.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | 5
|
2018-03-16T06:48:29.000Z
|
2018-04-17T09:47:15.000Z
|
deps/subversion/delta.gyp
|
yume-chan/node-svn
|
47f2eba70b55dcd15bda745b102668223a2b7f20
|
[
"MIT"
] | 4
|
2018-04-11T00:06:05.000Z
|
2019-10-25T01:34:40.000Z
|
{
"includes": [
"./common.gypi"
],
"targets": [
{
"sources": [
"subversion/subversion/libsvn_delta/branch.c",
"subversion/subversion/libsvn_delta/branch_compat.c",
"subversion/subversion/libsvn_delta/branch_migrate.c",
"subversion/subversion/libsvn_delta/branch_nested.c",
"subversion/subversion/libsvn_delta/branch_repos.c",
"subversion/subversion/libsvn_delta/cancel.c",
"subversion/subversion/libsvn_delta/compat.c",
"subversion/subversion/libsvn_delta/compose_delta.c",
"subversion/subversion/libsvn_delta/debug_editor.c",
"subversion/subversion/libsvn_delta/default_editor.c",
"subversion/subversion/libsvn_delta/deprecated.c",
"subversion/subversion/libsvn_delta/depth_filter_editor.c",
"subversion/subversion/libsvn_delta/editor.c",
"subversion/subversion/libsvn_delta/element.c",
"subversion/subversion/libsvn_delta/path_driver.c",
"subversion/subversion/libsvn_delta/svndiff.c",
"subversion/subversion/libsvn_delta/text_delta.c",
"subversion/subversion/libsvn_delta/version.c",
"subversion/subversion/libsvn_delta/xdelta.c"
],
"target_name": "libsvn_delta"
}
]
}
| 46.34375
| 76
| 0.589346
| 134
| 1,483
| 6.283582
| 0.231343
| 0.261283
| 0.586698
| 0.699525
| 0.811164
| 0.501188
| 0
| 0
| 0
| 0
| 0
| 0
| 0.300742
| 1,483
| 31
| 77
| 47.83871
| 0.811958
| 0
| 0
| 0.064516
| 0
| 0
| 0.656336
| 0.616391
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea5f763c3c3fbf02da6888f901eb30cb4aaae7e7
| 2,394
|
py
|
Python
|
alterpdf/subset.py
|
JosephVC/Python_PDF_OCR
|
bdd44f635a173c1a453c65cf9891037f8349f865
|
[
"MIT"
] | 1
|
2020-03-29T21:05:19.000Z
|
2020-03-29T21:05:19.000Z
|
alterpdf/subset.py
|
JosephVC/OCR_project
|
bdd44f635a173c1a453c65cf9891037f8349f865
|
[
"MIT"
] | null | null | null |
alterpdf/subset.py
|
JosephVC/OCR_project
|
bdd44f635a173c1a453c65cf9891037f8349f865
|
[
"MIT"
] | null | null | null |
<<<<<<< HEAD
import sys
import os
from pdfrw import PdfReader, PdfWriter
#separate out selected pages
# RUNNING THIS SCRIPT: python subset.py sample_pdfs/meetingminutes.pdf 4-5
# our first argument is the pdf we're looking to extract pages from
inpfn = sys.argv[1]
# our second argument is the range of pages we want
ranges = sys.argv[2:]
# if the user does not enter a range, throw an error asking for a range
assert ranges, "Expected at least one range"
# This defines how you format the range, "x-y"
ranges = ([int(y) for y in x.split('-')] for x in ranges)
# Create the output file prefaced with the term "subset" and then the pdf name
outfn = 'subset.%s' % os.path.basename(inpfn)
pages = PdfReader(inpfn).pages
outdata = PdfWriter(outfn)
# run through the ranges specified
# remember to use a-b, x-y, c-d style
# the
for onerange in ranges:
onerange = (onerange + onerange[-1:])[:2]
for pagenum in range(onerange[0], onerange[1]+1):
# outdata.addpage(pages[pagenum-1])
# FIXED: output began with one page less than specified
outdata.addpage(pages[pagenum])
outdata.write()
# TODO: right now output files overwrite each other
=======
import sys
import os
from pdfrw import PdfReader, PdfWriter
#separate out selected pages
# RUNNING THIS SCRIPT: python subset.py sample_pdfs/meetingminutes.pdf 4-5
# our first argument is the pdf we're looking to extract pages from
inpfn = sys.argv[1]
# our second argument is the range of pages we want
ranges = sys.argv[2:]
# if the user does not enter a range, throw an error asking for a range
assert ranges, "Expected at least one range"
# This defines how you format the range, "x-y"
ranges = ([int(y) for y in x.split('-')] for x in ranges)
# Create the output file prefaced with the term "subset" and then the pdf name
outfn = 'subset.%s' % os.path.basename(inpfn)
pages = PdfReader(inpfn).pages
outdata = PdfWriter(outfn)
# run through the ranges specified
# remember to use a-b, x-y, c-d style
# the
for onerange in ranges:
onerange = (onerange + onerange[-1:])[:2]
for pagenum in range(onerange[0], onerange[1]+1):
# outdata.addpage(pages[pagenum-1])
# FIXED: output began with one page less than specified
outdata.addpage(pages[pagenum])
outdata.write()
# TODO: right now output files overwrite each other
>>>>>>> f03fb14cd0fe7abcca5da198d212ae9413ecb158
| 30.692308
| 78
| 0.715121
| 380
| 2,394
| 4.5
| 0.286842
| 0.023392
| 0.030409
| 0.060819
| 0.974269
| 0.974269
| 0.974269
| 0.974269
| 0.974269
| 0.974269
| 0
| 0.02046
| 0.183375
| 2,394
| 77
| 79
| 31.090909
| 0.85422
| 0.518797
| 0
| 0.909091
| 0
| 0
| 0.065719
| 0
| 0
| 0
| 0
| 0.012987
| 0.060606
| 0
| null | null | 0
| 0.181818
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ea876938cc30c42c259377f734814ce9897a9805
| 24,096
|
py
|
Python
|
TEMPy/Consensus.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 2
|
2020-04-03T03:38:08.000Z
|
2020-06-21T02:31:38.000Z
|
TEMPy/Consensus.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 16
|
2017-06-16T20:06:14.000Z
|
2017-07-31T17:32:32.000Z
|
TEMPy/Consensus.py
|
OniDaito/ChimeraXTempy
|
a32ef6c54a403580f3a530ab36d91e475bf4b2dc
|
[
"MIT"
] | 1
|
2020-06-21T02:31:44.000Z
|
2020-06-21T02:31:44.000Z
|
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: Maya Topf, Daven Vasishtan, Arun Prasad Pandurangan,
# Irene Farabella, Agnel-Praveen Joseph, Harpal Sahota
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# Farabella, I., Vasishtan, D., Joseph, A.P., Pandurangan, A.P., Sahota, H. & Topf, M. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.ScoringFunctions import ScoringFunctions
from TEMPy.Cluster import Cluster
from numpy import zeros,mean,median,asarray
from scipy.stats import mode
import sys
from collections import defaultdict
class Consensus:
"""A class to clustering an ensemble of structure instance"""
def __init__(self):
pass
def _makedict_value(self,rankCCC):
"""
private function used in Consensus Module.
"""
#print rankCCC
rank_dict={}
for r in rankCCC:
rank_dict[r[0]]=r[2]
return rank_dict
def _makedict(self,rank_score):
"""
private function used in Consensus Module.
"""
namerank_score=[mod[0] for mod in rank_score]
d_rank={i:j for i,j in enumerate(namerank_score,start=1)}
return d_rank
def _makedict_list(self,list_score):
"""
private function used in Consensus Module.
"""
#print enumerate(rankCCC)
d_rank={i:j for i,j in list_score}
return d_rank
def _printdict(self,dict_score):
"""
private function used in Consensus Module.
"""
for k,v in list(dict_score.items()):
print(k,v)
def _modes(self,values):
"""
private function used in Consensus Module.
"""
count = defaultdict(int)
for v in values:
count[v] +=1
best = max(count.values())
print([k for k,v in list(count.items()) if v == best])
def _mode_here(self,arr):
"""
private function used in Consensus Module.
"""
m = max([arr.count(a) for a in arr])
print([x for x in arr if arr.count(x) == m][0] if m>1 else None)
def vote_mode(self,ensemble_list,score_list,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
Mode consensus scoring calculation between multiple "fits" using a user defined set of scores.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
*targetMap*
Target Map Instance.
"""
cluster=Cluster()
list_dict=[]
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
score_select=[]
for score in score_list:
#check if score chosen are correct
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s' % score)
print('Please select from one of the following scoring functions: %s' % ', '.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
if score not in score_select:
score_select.append(score)
else:
print('Chose the %s twice' % score)
sys.exit()
for score in score_list:
print("******",score)
if score=='CCC':
rankCCC=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCCC=Consensus()._makedict(rankCCC)
list_dict.append(dictCCC)
Consensus()._printdict(dictCCC)
elif score=='LAP':
rankLAP=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictLAP=Consensus()._makedict(rankLAP)
list_dict.append(dictLAP)
Consensus()._printdict(dictLAP)
elif score=='MI':
rankMI=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictMI=Consensus()._makedict(rankMI)
list_dict.append(dictMI)
Consensus()._printdict(dictMI)
elif score=='NV':
rankNV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNV=Consensus()._makedict(rankNV)
list_dict.append(dictNV)
Consensus()._printdict(dictNV)
elif score=='NV_Sobel':
rankNVS=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVS=Consensus()._makedict(rankNVS)
list_dict.append(dictNVS)
Consensus()._printdict(dictNVS)
elif score=='NV_Laplace':
rankNVL=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVL=Consensus()._makedict(rankNVL)
list_dict.append(dictNVL)
Consensus()._printdict(dictNVL)
elif score=='ENV':
rankENV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictENV=Consensus()._makedict(rankENV)
list_dict.append(dictENV)
Consensus()._printdict(dictENV)
if score=='CD':
rankCD=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCD=Consensus()._makedict(rankCD)
list_dict.append(dictCD)
Consensus()._printdict(dictCD)
dict_count={}
mxcinsensus = zeros(shape=(7,number_top_mod))
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
median_list=median(v)
m = max([v.count(a) for a in v])
if m>1:
mode_list=[x for x in v if v.count(x) == m][0]
dict_out[k]=[median_list,mode_list]
else:
pass
mode_list=max(set(v), key=v.count)
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1])
print("**************")
print("Consensus rank")
for fit in sorted_dict:
print(fit[1],fit[0])
return sorted_dict
def _borda_score(self,list_rank,candidate,voters):
"""
private function used in vote function.
It calculates the Borda count is a single-winner election method in which voters rank candidates in order of preference.
"""
score=0
for r in list_rank:
score+=(candidate-r)*voters
return score
def vote(self,ensemble_list,score_list,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
Borda consensus scoring calculation between multiple "fits" using a user defined set of scores.
The Borda count is a single-winner election method in which voters rank candidates in order of preference.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
*targetMap*
Target Map Instance.
"""
cluster=Cluster()
list_dict=[]
candidate=len(ensemble_list)
voters=len(score_list)
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
score_select=[]
for score in score_list:
#check if score chosen are correct
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s' % score)
print('Please select from one of the following scoring functions: %s' % ', '.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
if score not in score_select:
score_select.append(score)
else:
print('Chose the %s twice' % score)
sys.exit()
for score in score_list:
print("******",score)
if score=='CCC':
rankCCC=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
#print rankCCC
dictCCC=Consensus()._makedict(rankCCC)
list_dict.append(dictCCC)
Consensus()._printdict(dictCCC)
elif score=='LAP':
rankLAP=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictLAP=Consensus()._makedict(rankLAP)
list_dict.append(dictLAP)
Consensus()._printdict(dictLAP)
elif score=='MI':
rankMI=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictMI=Consensus()._makedict(rankMI)
list_dict.append(dictMI)
Consensus()._printdict(dictMI)
elif score=='NV':
rankNV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNV=Consensus()._makedict(rankNV)
list_dict.append(dictNV)
Consensus()._printdict(dictNV)
for i in rankNV:
print(i[0],i[2])
elif score=='NV_Sobel':
rankNVS=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVS=Consensus()._makedict(rankNVS)
list_dict.append(dictNVS)
Consensus()._printdict(dictNVS)
elif score=='NV_Laplace':
rankNVL=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVL=Consensus()._makedict(rankNVL)
list_dict.append(dictNVL)
Consensus()._printdict(dictNVL)
elif score=='ENV':
rankENV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictENV=Consensus()._makedict(rankENV)
list_dict.append(dictENV)
Consensus()._printdict(dictENV)
if score=='CD':
rankCD=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCD=Consensus()._makedict(rankCD)
list_dict.append(dictCD)
Consensus()._printdict(dictCD)
dict_count={}
#dict with [0,0,0,0,0,0,0,score_mod] possibility CCC,MI ... so keep order. add a colum for 'goal done -goal concived'
#in our case how many time is 1st.
#for d_rank in list_dict:
#sorted_dict = sorted(dict.items(), key=lambda x: x[1])
#print sorted_dict
mxcinsensus = zeros(shape=(7,number_top_mod))
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
#print 'k',k
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
# print k
# print sum(v)
#print mean(v)
median_list=median(v)
#print mode(v)
#most_frequent=mode(v)[0][0]
#print 'mode most freq',most_frequent
#Consensus().modes(v)
#Consensus().mode_here(v)
#print v
borda_score=Consensus()._borda_score(v,candidate,voters)
dict_out[k]=[borda_score,v]
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1][0],reverse=True)
print("**************")
print("Consensus rank")
line=''
line+="Borda_score\t"
for score in score_list:
line+='%s\t'%score
line+="Fit\n"
count=0
for fit in sorted_dict:
count+=1
line+='%s\t'%count
b=fit[1][0]
line+='%s\t'%b
for s in fit[1][1]:
line+='%s\t'%s
m=fit[0]
line+='%s\n'%m
print(line)
return sorted_dict
#need to make it more elegant this come from private scripting.
def vote_list(self,score_lists):
"""
Borda consensus scoring calculation between multiple "fits" using a user defined set of scores.
The Borda count is a single-winner election method in which voters rank candidates in order of preference.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of list. Each list is a list of Structure Instances associated with a score.
"""
dict_count={}
list_dict=[]
candidate=[]
voters=len(score_lists)
for i in score_lists:
candidate.append(len(i))
for list_score in score_lists:
dictScore=Consensus()._makedict(list_score)
list_dict.append(dictScore)
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
#print 'k',k
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
#print v
#v = asarray(v)
#print v
#median_list=median(v)
borda_score=Consensus()._borda_score(v,candidate[0],voters)
dict_out[k]=[borda_score]
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1][0],reverse=True)
print("**************")
print("Consensus rank")
line=''
line+="Borda_score\t"
count=0
for score in score_lists:
count+=1
line+='%s\t'%count
line+="Fit\n"
count=0
for fit in sorted_dict:
count+=1
line+='%s\t'%count
b=fit[1][0]
line+='%s\t'%b
for s in fit[1][1]:
line+='%s\t'%s
m=fit[0]
line+='%s\n'%m
print(line)
return sorted_dict
# def vote_list(self,score_lists):
#
# dict_count={}
# list_dict=[]
# for list_score in score_lists:
# dictScore=Consensus()._makedict_list(list_score)
# list_dict.append(dictScore)
# for k,v in list_dict[0].items():
# dict_count[v]=[]
# for k in dict_count:
# for num in range(len(list_dict)):
# for k2,v2 in list_dict[num].items():
# if k == v2:
# dict_count[k].append(k2)
# dict_out={}
# for k,v in dict_count.items():
# # print k
# # print sum(v)
# #print mean(v)
# #print median(v)
# most_frequent=mode(v)[0][0]
# dict_out[k]=most_frequent
# sorted_dict = sorted(dict_out.items(), key=lambda x: x[1])
# print "**************"
# print "Consensus rank"
# for fit in sorted_dict:
# print fit[1],fit[0]
# return sorted_dict
#
| 46.338462
| 196
| 0.539343
| 2,775
| 24,096
| 4.535856
| 0.138018
| 0.026694
| 0.036228
| 0.03019
| 0.835068
| 0.828077
| 0.811472
| 0.79312
| 0.78454
| 0.78454
| 0
| 0.013474
| 0.365496
| 24,096
| 520
| 197
| 46.338462
| 0.809798
| 0.381059
| 0
| 0.760456
| 0
| 0
| 0.048333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041825
| false
| 0.007605
| 0.026616
| 0
| 0.098859
| 0.152091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57ec4f2f991164c24b8987be5a9e63929a02d633
| 8,391
|
py
|
Python
|
tests/test_sync_client.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | null | null | null |
tests/test_sync_client.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-18T09:38:53.000Z
|
2021-01-18T09:38:53.000Z
|
tests/test_sync_client.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-15T05:15:08.000Z
|
2021-01-15T05:15:08.000Z
|
from unittest.mock import Mock
import pytest
from ice3x.clients.sync import IceCubedSyncClient
from ice3x.exceptions import UnauthorisedResourceException
class Response:
def raise_for_status(self) -> None:
pass
def json(self):
return {}
@pytest.fixture
def client():
"""Provides an authorized client as a fixture"""
return IceCubedSyncClient("api_key", "secret")
@pytest.fixture
def uclient():
"""Provides an unauthorized client as a fixture"""
return IceCubedSyncClient()
@pytest.fixture
def expected_data():
return {}
@pytest.fixture
def expected_response(expected_data):
"""Provides an expected response."""
response = Mock()
response.json.return_value = expected_data
return response
def test_get_public_trade_info(
mocker, expected_response, expected_data, client
) -> None:
"""Test the get_public_trade_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_public_trade_info(trade_id=1)
assert actual_data == expected_data
def test_get_public_trade_list(
mocker, expected_response, expected_data, client
) -> None:
"""Test the test_get_public_trade_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_public_trade_list()
assert actual_data == expected_data
def test_get_market_depth(mocker, expected_response, expected_data, client) -> None:
"""Test the get_market_depth of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_market_depth()
assert actual_data == expected_data
def test_get_pair_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_pair_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_pair_info(pair_id=1)
assert actual_data == expected_data
def test_get_pair_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_pair_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_pair_list(pair_id=1)
assert actual_data == expected_data
def test_get_currency_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_currency_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_currency_info(currency_id=1)
assert actual_data == expected_data
def test_get_currency_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_currency_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_currency_list(currency_id=1)
assert actual_data == expected_data
def test_get_orderbook_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_orderbook_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_orderbook_info(pair_id=1)
assert actual_data == expected_data
def test_get_market_depth_full(
mocker, expected_response, expected_data, client
) -> None:
"""Test the get_orderbook_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_market_depth_full()
assert actual_data == expected_data
def test_get_market_depth_bt_cav(
mocker, expected_response, expected_data, client
) -> None:
"""Test the get_market_depth_bt_cav of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_market_depth_bt_cav()
assert actual_data == expected_data
def test_get_invoice_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_invoice_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_invoice_list()
assert actual_data == expected_data
def test_get_invoice_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_invoice_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_invoice_info(invoice_id=1)
assert actual_data == expected_data
def test_get_invoice_pdf(mocker, expected_response, expected_data, client) -> None:
"""Test the get_invoice_pdf of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_invoice_pdf(invoice_id=1)
assert actual_data == expected_data
def test_cancel_order(mocker, expected_response, expected_data, client) -> None:
"""Test the cancel_order of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.cancel_order(order_id=1)
assert actual_data == expected_data
def test_create_order(mocker, expected_response, expected_data, client) -> None:
"""Test the create_order of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.create_order(pair_id=1, amount=100, kind="buy", price=100)
assert actual_data == expected_data
def test_get_order_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_order_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_order_info(order_id=1)
assert actual_data == expected_data
def test_get_order_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_order_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_order_list()
assert actual_data == expected_data
def test_get_transaction_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_transaction_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_transaction_info(transaction_id=1)
assert actual_data == expected_data
def test_get_transaction_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_transaction_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_transaction_list()
assert actual_data == expected_data
def test_get_trade_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_trade_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_trade_info(trade_id=1)
assert actual_data == expected_data
def test_get_trade_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_trade_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_trade_list()
assert actual_data == expected_data
def test_get_balance_list(mocker, expected_response, expected_data, client) -> None:
"""Test the get_balance_list of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_balance_list()
assert actual_data == expected_data
def test_get_balance_info(mocker, expected_response, expected_data, client) -> None:
"""Test the get_balance_info of the sync client"""
mocker.patch("requests.Session.request", return_value=expected_response)
actual_data = client.get_balance_info(currency_id=1)
assert actual_data == expected_data
def test_unauthorised_access(mocker, expected_response, uclient):
"""Test that the requires_authentication throws an error when accessing a resource without authentication"""
with pytest.raises(UnauthorisedResourceException):
mocker.patch("requests.Session.request", return_value=expected_response)
uclient.get_balance_info(currency_id=1)
| 31.904943
| 112
| 0.762126
| 1,129
| 8,391
| 5.343667
| 0.077059
| 0.132604
| 0.078734
| 0.111387
| 0.868059
| 0.859771
| 0.842367
| 0.842367
| 0.842367
| 0.782529
| 0
| 0.003202
| 0.143964
| 8,391
| 262
| 113
| 32.026718
| 0.836698
| 0.151233
| 0
| 0.480315
| 0
| 0
| 0.084741
| 0.082451
| 0
| 0
| 0
| 0
| 0.181102
| 1
| 0.23622
| false
| 0.007874
| 0.031496
| 0.015748
| 0.314961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17b899f7c43ee5f859fa68c4e4bb9ea581619564
| 9,240
|
py
|
Python
|
plot_grs_test.py
|
CosmoLike/WFIRST_forecasts
|
aa65774dc1870450723b0a13449681e37d0f979c
|
[
"MIT"
] | 1
|
2019-08-21T00:36:40.000Z
|
2019-08-21T00:36:40.000Z
|
plot_grs_test.py
|
CosmoLike/WFIRST_forecasts
|
aa65774dc1870450723b0a13449681e37d0f979c
|
[
"MIT"
] | null | null | null |
plot_grs_test.py
|
CosmoLike/WFIRST_forecasts
|
aa65774dc1870450723b0a13449681e37d0f979c
|
[
"MIT"
] | null | null | null |
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
#add location of data vector file for plotting
datavfile1 = "GRS_data_vector"
datavfile2 = "GRS_pred_vector"
d1 = np.genfromtxt(datavfile1)[:,3]
d2 = np.genfromtxt(datavfile2)[:,3]
zbin= np.genfromtxt(datavfile2)[:,0]
kvalues= np.genfromtxt(datavfile2)[:,1]
muvalues=np.genfromtxt(datavfile2)[:,2]
#define plot ranges
Nk=100
Nmu=10
Nz=7
plotmax=Nk*Nmu
plotfile = "plots/GRS_test.png"
#variance of the GRS data points
varfile = "./GRS_variance"
var = np.sqrt(np.genfromtxt(varfile)[:,3])
ndata = d1.shape[0]
#file with Y1 scale cuts
print "chi2 calculated"
chi =0.0
for i in range(0,ndata):
chi +=(d1[i]-d2[i])*(d1[i]-d2[i])/var[i]
print "GRS: Delta chi2 = %f" %(chi)
print kvalues[0:Nk*Nmu:Nmu]
plt.figure(figsize=(6,6), dpi=1000)
fs = 18
plt.subplot(2,2,1)
plt.yscale('log')
plt.xscale('log')
plt.ylim(1.0e+01,2.5e+03)
plt.xlim(0.002,0.33)
#plt.title(r'$\xi_+$')
plt.ylabel(r'$P(k)$', fontsize = fs)
plt.errorbar(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],var[0:Nk*Nmu:Nmu],marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
plt.plot(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1)
plt.plot(kvalues[1:Nk*Nmu:Nmu],d1[1:Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1)
plt.plot(kvalues[2:Nk*Nmu:Nmu],d1[2:Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.plot(kvalues[3:Nk*Nmu:Nmu],d1[3:Nk*Nmu:Nmu],marker='o', color='orange',linestyle = '-',markersize = 1)
plt.plot(kvalues[4:Nk*Nmu:Nmu],d1[4:Nk*Nmu:Nmu],marker='o', color='brown',linestyle = '-',markersize = 1)
plt.plot(kvalues[5:Nk*Nmu:Nmu],d1[5:Nk*Nmu:Nmu],marker='o', color='cyan',linestyle = '-',markersize = 1)
plt.plot(kvalues[6:Nk*Nmu:Nmu],d1[6:Nk*Nmu:Nmu],marker='o', color='yellow',linestyle = '-',markersize = 1)
plt.plot(kvalues[7:Nk*Nmu:Nmu],d1[7:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1)
plt.plot(kvalues[8:Nk*Nmu:Nmu],d1[8:Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1)
plt.plot(kvalues[9:Nk*Nmu:Nmu],d1[9:Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.subplot(2,2,2)
#plt.yscale('log')
plt.ylim(3.0e+01,2.5e+03)
plt.xlim(0.002,0.33)
#plt.title(r'$\xi_+$')
plt.ylabel(r'$P(k)$', fontsize = fs)
plt.errorbar(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],var[0:Nk*Nmu:Nmu],marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
plt.plot(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1.)
plt.plot(kvalues[1:Nk*Nmu:Nmu],d1[1:Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1.)
plt.plot(kvalues[2:Nk*Nmu:Nmu],d1[2:Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.plot(kvalues[3:Nk*Nmu:Nmu],d1[3:Nk*Nmu:Nmu],marker='o', color='orange',linestyle = '-',markersize = 1)
plt.plot(kvalues[4:Nk*Nmu:Nmu],d1[4:Nk*Nmu:Nmu],marker='o', color='brown',linestyle = '-',markersize = 1)
plt.plot(kvalues[5:Nk*Nmu:Nmu],d1[5:Nk*Nmu:Nmu],marker='o', color='cyan',linestyle = '-',markersize = 1)
plt.plot(kvalues[6:Nk*Nmu:Nmu],d1[6:Nk*Nmu:Nmu],marker='o', color='yellow',linestyle = '-',markersize = 1)
plt.plot(kvalues[7:Nk*Nmu:Nmu],d1[7:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1)
plt.plot(kvalues[8:Nk*Nmu:Nmu],d1[8:Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1)
plt.plot(kvalues[9:Nk*Nmu:Nmu],d1[9:Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.subplot(2,2,3)
plt.yscale('log')
plt.xscale('log')
plt.ylim(1.0e+01,2.5e+03)
plt.xlim(0.002,0.33)
#plt.title(r'$\xi_+$')
plt.ylabel(r'$P(k)$', fontsize = fs)
plt.errorbar(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],var[0:Nk*Nmu:Nmu],marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
plt.plot(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1)
plt.plot(kvalues[1:Nk*Nmu:Nmu],d1[1:Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1)
plt.plot(kvalues[2:Nk*Nmu:Nmu],d1[2:Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.plot(kvalues[3:Nk*Nmu:Nmu],d1[3:Nk*Nmu:Nmu],marker='o', color='orange',linestyle = '-',markersize = 1)
plt.plot(kvalues[4:Nk*Nmu:Nmu],d1[4:Nk*Nmu:Nmu],marker='o', color='brown',linestyle = '-',markersize = 1)
plt.plot(kvalues[5:Nk*Nmu:Nmu],d1[5:Nk*Nmu:Nmu],marker='o', color='cyan',linestyle = '-',markersize = 1)
plt.plot(kvalues[6:Nk*Nmu:Nmu],d1[6:Nk*Nmu:Nmu],marker='o', color='yellow',linestyle = '-',markersize = 1)
plt.subplot(2,2,4)
#plt.yscale('log')
plt.ylim(3.0e+01,2.5e+03)
plt.xlim(0.002,0.33)
#plt.title(r'$\xi_+$')
plt.ylabel(r'$P(k)$', fontsize = fs)
plt.errorbar(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],var[0:Nk*Nmu:Nmu],marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
plt.plot(kvalues[0:Nk*Nmu:Nmu],d1[0:Nk*Nmu:Nmu],marker='o', color='r',linestyle = '-',markersize = 1.)
plt.plot(kvalues[1*Nk*Nmu:2*Nk*Nmu:Nmu],d1[1*Nk*Nmu:2*Nk*Nmu:Nmu],marker='o', color='b',linestyle = '-',markersize = 1.)
plt.plot(kvalues[2*Nk*Nmu:3*Nk*Nmu:Nmu],d1[2*Nk*Nmu:3*Nk*Nmu:Nmu],marker='o', color='g',linestyle = '-',markersize = 1)
plt.plot(kvalues[3*Nk*Nmu:4*Nk*Nmu:Nmu],d1[3*Nk*Nmu:4*Nk*Nmu:Nmu],marker='o', color='orange',linestyle = '-',markersize = 1)
plt.plot(kvalues[4*Nk*Nmu:5*Nk*Nmu:Nmu],d1[4*Nk*Nmu:5*Nk*Nmu:Nmu],marker='o', color='brown',linestyle = '-',markersize = 1)
plt.plot(kvalues[5*Nk*Nmu:6*Nk*Nmu:Nmu],d1[5*Nk*Nmu:6*Nk*Nmu:Nmu],marker='o', color='cyan',linestyle = '-',markersize = 1)
plt.plot(kvalues[6*Nk*Nmu:7*Nk*Nmu:Nmu],d1[6*Nk*Nmu:7*Nk*Nmu:Nmu],marker='o', color='yellow',linestyle = '-',markersize = 1)
plt.savefig(plotfile,dpi=1000)
# plt.figure(figsize=(8,8), dpi=400)
# fs = 18
# plt.subplot(4,2,1)
# plt.yscale('log')
# plt.ylim(2.e-7,1.2e-4)
# plt.xlim(0.001,0.3)
# #plt.title(r'$\xi_+$')
# plt.ylabel(r'$\xi_+$', fontsize = fs)
# plt.errorbar(ind,d1,s,marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
# plt.plot(kvalues,d1,marker='o', color='r',linestyle = '',markersize = 1.5)
# plt.subplot(4,2,2)
# plt.ylim(-0.25,0.25)
# plt.plot([0,1000],[0,0],linestyle ='--',color='k')
# plt.xlim(0,nxip-1)
# plt.ylabel(r'(d2-d1)/d2', fontsize = fs)
# plt.errorbar(ind,d1*0,s/d1,marker='o', color='k',linestyle = '',markersize = 0.0,alpha = 0.1)
# plt.plot(ind[ind0],(d2[ind0]-d1[ind0])/d2[ind0],marker='x', color='k',linestyle = '',markersize = 1.0)
# plt.plot(ind[ind1],(d2[ind1]-d1[ind1])/d2[ind1],marker='o', color='r',linestyle = '',markersize = 1.0)
# plt.subplot(4,2,3)
# plt.yscale('log')
# plt.ylim(2.e-7,6.e-5)
# plt.xlim(nxip,nxip+nxim-1)
# #plt.title(r'$\xi_-$')
# plt.ylabel(r'$\xi_-$', fontsize = fs)
# plt.errorbar(ind,d1,s,marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.25)
# plt.plot(ind,d1,marker='o', color='r',linestyle = '',markersize = 1.5)
# plt.subplot(4,2,4)
# plt.ylim(-0.25,0.25)
# plt.plot([0,1000],[0,0],linestyle ='--',color='k')
# plt.xlim(nxip,nxip+nxim-1)
# plt.ylabel(r'(d2-d1)/d2', fontsize = fs)
# plt.errorbar(ind,d1*0,s/d1,marker='o', color='k',linestyle = '',markersize = 0.0,alpha = 0.1)
# plt.plot(ind[ind0],(d2[ind0]-d1[ind0])/d2[ind0],marker='x', color='k',linestyle = '',markersize = 1.0)
# plt.plot(ind[ind1],(d2[ind1]-d1[ind1])/d2[ind1],marker='o', color='r',linestyle = '',markersize = 1.0)
# plt.subplot(4,2,5)
# plt.yscale('log')
# plt.ylim(2.e-6,2.5e-3)
# plt.xlim(nxip+nxim,nxip+nxim+nggl-1)
# #plt.title(r'$\gamma_t$')
# plt.ylabel(r'$\gamma_t$', fontsize = fs)
# plt.errorbar(ind,d1,s,marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.2)
# plt.plot(ind,d1,marker='o', color='r',linestyle = '',markersize = 1.5)
# #plt.plot(ind,d3,linestyle = '-')
# plt.subplot(4,2,6)
# plt.ylim(-0.25,0.25)
# plt.plot([0,1000],[0,0],linestyle ='--',color='k')
# plt.xlim(nxip+nxim,nxip+nxim+nggl-1)
# #plt.title(r'$\gamma_t$')
# plt.ylabel(r'(d2-d1)/d2', fontsize = fs)
# plt.errorbar(ind,d1*0,s/d1,marker='o', color='k',linestyle = '',markersize = 0.0,alpha = 0.1)
# plt.plot(ind[ind0],(d2[ind0]-d1[ind0])/d2[ind0],marker='x', color='k',linestyle = '',markersize = 1.0)
# plt.plot(ind[ind1],(d2[ind1]-d1[ind1])/d2[ind1],marker='o', color='r',linestyle = '',markersize = 1.0)
# plt.subplot(4,2,7)
# plt.yscale('log')
# plt.ylim(1.e-4,0.6)
# plt.xlim(nxip+nxim+nggl,ndata)
# #plt.title(r'$w$')
# plt.ylabel(r'$w$', fontsize = fs)
# plt.xlabel(r'bin number', fontsize = fs)
# plt.errorbar(ind,d1,s,marker='o', color='k',linestyle = '',markersize = 0.5,alpha = 0.4)
# plt.plot(ind,d1,marker='o', color='r',linestyle = '',markersize = 1.5)
# #plt.plot(ind,d3,linestyle = '-')
# plt.subplot(4,2,8)
# plt.ylim(-0.04,0.04)
# plt.plot([0,1000],[0,0],linestyle ='--',color='k')
# plt.xlim(nxip+nxim+nggl,ndata)
# #plt.title(r'$w$')
# plt.xlabel(r'bin number', fontsize = 18)
# plt.ylabel(r'(d2-d1)/d2', fontsize = fs)
# plt.errorbar(ind,d1*0,s/d1,marker='o', color='k',linestyle = '',markersize = 0.0,alpha = 0.1)
# plt.plot(ind[ind0],(d2[ind0]-d1[ind0])/d2[ind0],marker='x', color='k',linestyle = '',markersize = 1.0)
# plt.plot(ind[ind1],(d2[ind1]-d1[ind1])/d2[ind1],marker='o', color='r',linestyle = '',markersize = 1.0)
# plt.tight_layout()
# plt.savefig(plotfile,dpi=400)
| 46.666667
| 137
| 0.648377
| 1,730
| 9,240
| 3.452601
| 0.079191
| 0.078687
| 0.108488
| 0.06362
| 0.86824
| 0.862381
| 0.854177
| 0.826385
| 0.819019
| 0.819019
| 0
| 0.066589
| 0.07684
| 9,240
| 197
| 138
| 46.903553
| 0.633646
| 0.389069
| 0
| 0.546512
| 0
| 0
| 0.056006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.046512
| null | null | 0.034884
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4e372950f259ccc332309603895d7ffcef44bef
| 79
|
py
|
Python
|
src/flowket/__init__.py
|
vigsterkr/FlowKet
|
0d8f301b5f51a1bab83021f10f65cfb5f2751079
|
[
"MIT"
] | 21
|
2019-11-19T13:59:13.000Z
|
2021-12-03T10:26:30.000Z
|
src/flowket/__init__.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 10
|
2019-11-15T12:07:28.000Z
|
2020-11-07T18:12:18.000Z
|
src/flowket/__init__.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 11
|
2019-12-09T22:51:17.000Z
|
2021-11-29T22:05:41.000Z
|
from .utils.v1_to_v2 import fix_tensorflow_v1_names
fix_tensorflow_v1_names()
| 19.75
| 51
| 0.873418
| 14
| 79
| 4.357143
| 0.642857
| 0.42623
| 0.491803
| 0.655738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054795
| 0.075949
| 79
| 3
| 52
| 26.333333
| 0.780822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a4ebf121a50aa9c2b9c1d31ee325aac4a61b4b6b
| 5,952
|
py
|
Python
|
tests/conftest.py
|
tlawrence3/bplogofuntest
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
tlawrence3/bplogofuntest
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | 7
|
2019-01-18T03:41:16.000Z
|
2019-06-29T01:56:32.000Z
|
tests/conftest.py
|
tlawrence3/tsfm
|
26b90eb9ec604f73e2f5df3548646906bf9f6a6d
|
[
"MIT"
] | 2
|
2017-10-05T18:11:06.000Z
|
2019-01-11T15:13:28.000Z
|
import pytest
import os
@pytest.fixture(scope="module")
def cove_files(tmpdir_factory):
struct_string_cove = "#=CS >>>>>>>..>>>>...........<<<<.>>>>>.......<<<<<.....>>>>>....\n#=CS ...<<<<<<<<<<<<.\n"
struct_string_text ="""A:0,72,1,71,2,70,3,69,4,68,5,67,6,66
D:9,25,10,24,11,23,12,22
C:27,43,28,42,29,41,30,40,31,39
T:49,65,50,64,51,63,52,62,53,61
"""
H_class = """CLUSTAL W (1.81) multiple sequence alignment
HGTG_gi|1002161287|ref|NC_029347.1||109108|109035|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|1002161287|ref|NC_029347.1||61258|61331|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|187763084|ref|NC_010654.1||118994|118921|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGGATCCACCACgCGCGGGTTCA
HGTG_gi|187763084|ref|NC_010654.1||69289|69362|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGGATCCACCACgCGCGGGTTCA
HGTG_gi|222084134|ref|NC_011942.1||56864|56791|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|222084134|ref|NC_011942.1||9716|9789|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|222139869|ref|NC_011954.1||109042|108969|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|222139869|ref|NC_011954.1||61119|61192|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|512721557|ref|NC_021438.1||114464|114391|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|512721557|ref|NC_021438.1||67253|67326|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|752789973|ref|NC_026301.1||114178|114105|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|752789973|ref|NC_026301.1||67436|67509|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|966203074|ref|NC_028734.1||56111|56038|0|0|tSE|-||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
HGTG_gi|966203074|ref|NC_028734.1||9449|9522|0|0|tSE|+||GNET| GCGGACGTAGCCAAGT--GGCtcAAGGCAGTGGATTGTGAATCCACCACgCGCGGGTTCA
**************** ********************* ********************
HGTG_gi|1002161287|ref|NC_029347.1||109108|109035|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|1002161287|ref|NC_029347.1||61258|61331|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|187763084|ref|NC_010654.1||118994|118921|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|187763084|ref|NC_010654.1||69289|69362|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|222084134|ref|NC_011942.1||56864|56791|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|222084134|ref|NC_011942.1||9716|9789|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|222139869|ref|NC_011954.1||109042|108969|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|222139869|ref|NC_011954.1||61119|61192|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|512721557|ref|NC_021438.1||114464|114391|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|512721557|ref|NC_021438.1||67253|67326|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|752789973|ref|NC_026301.1||114178|114105|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|752789973|ref|NC_026301.1||67436|67509|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|966203074|ref|NC_028734.1||56111|56038|0|0|tSE|-||GNET| ATCCCCGTCGTTCGCC
HGTG_gi|966203074|ref|NC_028734.1||9449|9522|0|0|tSE|+||GNET| ATCCCCGTCGTTCGCC
****************
"""
K_class = """CLUSTAL W (1.81) multiple sequence alignment
Kttt_gi|1002161287|ref|NC_029347.1||3573|1209|38|2330|ARA|-||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGACTAGtTCCGGGTTCG
Kttt_gi|187763084|ref|NC_010654.1||3659|1145|38|2480|ARA|-||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGAAGAGtTCCGGGTTCG
Kttt_gi|222084134|ref|NC_011942.1||5508|7908|38|2366|ARA|+||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGAAGAGtTCCGGGTTCG
Kttt_gi|222139869|ref|NC_011954.1||3577|1209|38|2334|ARA|-||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGACTAGtTCCGGGTTCG
Kttt_gi|512721557|ref|NC_021438.1||3654|1253|38|2367|ARA|-||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGAAGAGtTCCGGGTTCG
Kttt_gi|752789973|ref|NC_026301.1||3353|968|38|2351|ARA|-||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGAAGAGtTCCGGGTTCG
Kttt_gi|966203074|ref|NC_028734.1||5248|7641|38|2359|ARA|+||GNET| GGGTTGCTAACTCAAT--GGT--AGAGTACTCGGCTTTTAACCGAAGAGtTCCGGGTTCG
**************** *** ********************** *************
Kttt_gi|1002161287|ref|NC_029347.1||3573|1209|38|2330|ARA|-||GNET| AATCCCGGGCAACCCA
Kttt_gi|187763084|ref|NC_010654.1||3659|1145|38|2480|ARA|-||GNET| AATCCCGGGCAACCCA
Kttt_gi|222084134|ref|NC_011942.1||5508|7908|38|2366|ARA|+||GNET| AATCCCGGGCAACCCA
Kttt_gi|222139869|ref|NC_011954.1||3577|1209|38|2334|ARA|-||GNET| AATCCCGGGCAACCCA
Kttt_gi|512721557|ref|NC_021438.1||3654|1253|38|2367|ARA|-||GNET| AATCCCGGGCAACCCA
Kttt_gi|752789973|ref|NC_026301.1||3353|968|38|2351|ARA|-||GNET| AATCCCGGGCAACCCA
Kttt_gi|966203074|ref|NC_028734.1||5248|7641|38|2359|ARA|+||GNET| AATCCCGGGCAACCCA
****************
"""
cove = tmpdir_factory.mktemp("data").join("struct_cove.txt")
cove.write(struct_string_cove)
cove_file = open(str(cove), "r")
text = tmpdir_factory.mktemp("data").join("struct_text.txt")
text.write(struct_string_text)
text_file = open(str(text), "r")
H_file = tmpdir_factory.mktemp("data").join("GNET_H.aln")
H_file.write(H_class)
K_file = open(str(H_file)[:-10] + "GNET_K.aln", "w")
K_file.write(K_class)
K_file.close()
return ({'cove': cove_file, 'prefix': str(H_file)[:-6], 'tmp': str(H_file)[:-10], 'text': text_file})
| 70.857143
| 127
| 0.714046
| 794
| 5,952
| 5.20529
| 0.207809
| 0.050811
| 0.033874
| 0.060973
| 0.889185
| 0.855553
| 0.839584
| 0.740866
| 0.664892
| 0.664892
| 0
| 0.24398
| 0.099966
| 5,952
| 83
| 128
| 71.710843
| 0.527534
| 0
| 0
| 0.069444
| 0
| 0.625
| 0.887078
| 0.702571
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.027778
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1073bf49df364efbbd0fd7192bea3ad4b6021161
| 1,010
|
py
|
Python
|
test/test_wallet.py
|
peerchemist/ZTipBot
|
17dd9b9115aa0334b580812ff36c02540848ecef
|
[
"MIT"
] | 3
|
2020-03-24T17:09:38.000Z
|
2020-03-24T17:32:04.000Z
|
test/test_wallet.py
|
peerchemist/ZTipBot
|
17dd9b9115aa0334b580812ff36c02540848ecef
|
[
"MIT"
] | 4
|
2020-03-12T15:11:06.000Z
|
2020-04-07T14:59:15.000Z
|
test/test_wallet.py
|
peerchemist/ZTipBot
|
17dd9b9115aa0334b580812ff36c02540848ecef
|
[
"MIT"
] | 1
|
2020-03-24T17:09:50.000Z
|
2020-03-24T17:09:50.000Z
|
from unittest.mock import patch
from src.wallet import check_balance
MOCK_USER_ID = 12345
@patch('src.wallet.get_balance')
def test_negative_balance(mock_get_balance):
mock_get_balance.return_value = -1.0
assert not check_balance(MOCK_USER_ID, -1.1)
assert not check_balance(MOCK_USER_ID, -1.0)
assert not check_balance(MOCK_USER_ID, 0.0)
assert not check_balance(MOCK_USER_ID, 1.0)
@patch('src.wallet.get_balance')
def test_zero_balance(mock_get_balance):
mock_get_balance.return_value = 0.0
assert check_balance(MOCK_USER_ID, -1.0)
assert check_balance(MOCK_USER_ID, 0.0)
assert not check_balance(MOCK_USER_ID, 0.1)
assert not check_balance(MOCK_USER_ID, 1.0)
@patch('src.wallet.get_balance')
def test_positive_balance(mock_get_balance):
mock_get_balance.return_value = 1.0
assert check_balance(MOCK_USER_ID, -1.0)
assert check_balance(MOCK_USER_ID, 0.0)
assert check_balance(MOCK_USER_ID, 1.0)
assert not check_balance(MOCK_USER_ID, 1.1)
| 29.705882
| 48
| 0.766337
| 175
| 1,010
| 4.062857
| 0.137143
| 0.293952
| 0.292546
| 0.365682
| 0.900141
| 0.869198
| 0.869198
| 0.825598
| 0.825598
| 0.759494
| 0
| 0.040276
| 0.139604
| 1,010
| 33
| 49
| 30.606061
| 0.777906
| 0
| 0
| 0.375
| 0
| 0
| 0.065347
| 0.065347
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
108300112e63c09cde3890f0f4671341491fe8e2
| 93,443
|
py
|
Python
|
pycode/test.py
|
niumeng07/serving
|
9a42286c2e8e7e99f2b85a58f8811329229c6479
|
[
"Apache-2.0"
] | 1
|
2019-10-28T07:37:07.000Z
|
2019-10-28T07:37:07.000Z
|
pycode/test.py
|
niumeng07/serving
|
9a42286c2e8e7e99f2b85a58f8811329229c6479
|
[
"Apache-2.0"
] | null | null | null |
pycode/test.py
|
niumeng07/serving
|
9a42286c2e8e7e99f2b85a58f8811329229c6479
|
[
"Apache-2.0"
] | null | null | null |
math_ops = b"\n,\n\003Abs\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\no\n\rAccumulateNV2\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\200\001\001\220\001\001\n/\n\004Acos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Acosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\003Add\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\005\003\t\010\022\007\nW\n\004AddN\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\025\200\001\001\220\001\001\nA\n\005AddV2\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\032\n\001T\022\004type:\017\n\r2\013\016\023\001\002\004\006\005\003\t\010\022\200\001\001\220\001\001\nh\n\003All\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nT\n\005Angle\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\nh\n\003Any\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\ni\n\020ApproximateEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\ttolerance\022\005float\032\005%\254\305\'7\220\001\001\n\233\001\n\006ArgMax\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n\233\001\n\006ArgMin\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n/\n\004Asin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Asinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Atan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n4\n\005Atan2\022\006\n\001y\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n.\n\005Atanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nh\n\013BatchMatMul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\"\021\n\005adj_x\022\004bool\032\002(\000\"\021\n\005adj_y\022\004bool\032\002(\000\n<\n\007Betainc\022\006\n\001a\"\001T\022\006\n\001b\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nK\n\010Bincount\022\007\n\003arr\030\003\022\010\n\004size\030\003\022\014\n\007weights\"\001T\032\t\n\004bins\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\nS\n\tBucketize\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\nboundaries\022\013list(float)\n8\n\004Cast\022\t\n\001x\"\004SrcT\032\t\n\001y\"\004DstT\"\014\n\004SrcT\022\004type\"\014\n\004DstT\022\004type\n+\n\004Ceil\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\nn\n\013ClipByValue\022\006\n\001t\"\001T\022\023\n\016clip_value_min\"\001T\022\023\n\016clip_value_max\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\nT\n\021CompareAndBitpack\022\n\n\005input\"\001T\022\016\n\tthreshold\"\001T\032\n\n\006output\030\004\"\027\n\001T\022\004type:\014\n\n2\010\n\023\001\002\006\005\003\t\n]\n\007Complex\022\t\n\004real\"\001T\022\t\n\004imag\"\001T\032\013\n\003out\"\004Tout\"\025\n\001T\022\004type\032\0020\001:\006\n\0042\002\001\002\"\030\n\004Tout\022\004type\032\0020\010:\006\n\0042\002\010\022\nP\n\nComplexAbs\022\006\n\001x\"\001T\032\t\n\001y\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n7\n\004Conj\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type\032\0020\010:\007\n\0052\003\010\022\025\n,\n\003Cos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Cosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\005Cross\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\221\001\n\007Cumprod\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\006Cumsum\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\007Digamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\003Div\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\nB\n\005Equal\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n*\n\003Erf\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\004Erfc\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n,\n\003Exp\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Expm1\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n,\n\005Floor\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n?\n\010FloorDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n9\n\010FloorMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n=\n\007Greater\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nB\n\014GreaterEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n}\n\023HistogramFixedWidth\022\013\n\006values\"\001T\022\020\n\013value_range\"\001T\022\t\n\005nbins\030\003\032\014\n\003out\"\005dtype\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\005dtype\022\004type\032\0020\003:\006\n\0042\002\003\t\n3\n\006Igamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n4\n\007Igammac\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nS\n\004Imag\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n.\n\003Inv\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n9\n\007InvGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\010IsFinite\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsInf\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsNan\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\004Less\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n?\n\tLessEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n-\n\006Lgamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\ni\n\010LinSpace\022\n\n\005start\"\001T\022\t\n\004stop\"\001T\022\013\n\003num\"\004Tidx\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\016\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n,\n\003Log\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Log1p\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n$\n\nLogicalAnd\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\n\032\n\nLogicalNot\022\005\n\001x\030\n\032\005\n\001y\030\n\n#\n\tLogicalOr\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\np\n\006MatMul\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\n\214\001\n\003Max\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Maximum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n\215\001\n\004Mean\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\003Min\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Minimum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n5\n\003Mod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\003\t\023\023\016\001\002\n=\n\003Mul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\220\001\001\n.\n\003Neg\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nE\n\010NotEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n6\n\tPolygamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n6\n\003Pow\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\001\023\002\003\t\010\022\n\215\001\n\004Prod\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\267\001\n\032QuantizeDownAndShrinkRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedAdd\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\n\235\002\n\017QuantizedMatMul\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\"\n\013Tactivation\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedMul\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\na\n\005Range\022\r\n\005start\"\004Tidx\022\r\n\005limit\"\004Tidx\022\r\n\005delta\"\004Tidx\032\016\n\006output\"\004Tidx\"\033\n\004Tidx\022\004type\032\0020\003:\t\n\0072\005\016\001\002\003\t\nS\n\004Real\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n>\n\007RealDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\nReciprocal\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n@\n\016ReciprocalGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\177\n\023RequantizationRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\001\n\nRequantize\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\022\030\n\024requested_output_min\030\001\022\030\n\024requested_output_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n+\n\004Rint\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\005Round\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Rsqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n;\n\tRsqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nt\n\nSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nu\n\013SegmentMean\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\nSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\ny\n\nSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n?\n\006Select\022\r\n\tcondition\030\n\022\006\n\001t\"\001T\022\006\n\001e\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n0\n\007Sigmoid\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n=\n\013SigmoidGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Sign\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n,\n\003Sin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Sinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\301\001\n\014SparseMatMul\022\007\n\001a\"\002Ta\022\007\n\001b\"\002Tb\032\013\n\007product\030\001\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\027\n\013a_is_sparse\022\004bool\032\002(\000\"\027\n\013b_is_sparse\022\004bool\032\002(\000\"\026\n\002Ta\022\004type\032\0020\001:\006\n\0042\002\001\016\"\026\n\002Tb\022\004type\032\0020\001:\006\n\0042\002\001\016\nz\n\021SparseSegmentMean\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\217\001\n\025SparseSegmentMeanGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\311\001\n SparseSegmentMeanWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n{\n\022SparseSegmentSqrtN\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\026SparseSegmentSqrtNGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\312\001\n!SparseSegmentSqrtNWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\001\n\020SparseSegmentSum\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\322\001\n\037SparseSegmentSumWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n-\n\004Sqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010SqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n1\n\006Square\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nG\n\021SquaredDifference\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\220\001\001\n:\n\003Sub\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n\214\001\n\003Sum\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\003Tan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n-\n\004Tanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010TanhGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\013TruncateDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n<\n\013TruncateMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n\274\001\n\022UnsortedSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\022UnsortedSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\275\001\n\023UnsortedSegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\301\001\n\022UnsortedSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n1\n\004Zeta\022\006\n\001x\"\001T\022\006\n\001q\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002"
array_ops = b"\nm\n\023BatchMatrixBandPart\022\n\n\005input\"\001T\022\r\n\tnum_lower\030\t\022\r\n\tnum_upper\030\t\032\t\n\004band\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixBandPart\nL\n\017BatchMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\022\010\016\022\016Use MatrixDiag\nS\n\023BatchMatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004typeB\026\010\016\022\022Use MatrixDiagPart\n^\n\022BatchMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB\025\010\016\022\021Use MatrixSetDiag\nr\n\014BatchToSpace\022\n\n\005input\"\001T\022\r\n\005crops\"\004Tidx\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\240\001\n\016BatchToSpaceND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\017\n\005crops\"\006Tcrops\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\032\n\006Tcrops\022\004type\032\0020\003:\006\n\0042\002\003\t\nl\n\007Bitcast\022\n\n\005input\"\001T\032\016\n\006output\"\004type\" \n\001T\022\004type:\025\n\0232\021\016\023\001\002\t\003\004\021\006\005\010\022\013\014\017\020\r\"#\n\004type\022\004type:\025\n\0232\021\016\023\001\002\t\003\004\021\006\005\010\022\013\014\017\020\r\nA\n\rBroadcastArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nR\n\025BroadcastGradientArgs\022\007\n\002s0\"\001T\022\007\n\002s1\"\001T\032\007\n\002r0\"\001T\032\007\n\002r1\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nQ\n\rCheckNumerics\022\013\n\006tensor\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\"\021\n\007message\022\006string\nN\n\006Concat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\nI\n\014ConcatOffset\022\016\n\nconcat_dim\030\003\022\014\n\005shape\030\003*\001N\032\r\n\006offset\030\003*\001N\"\014\n\001N\022\003int(\0010\002\nh\n\010ConcatV2\022\016\n\006values\"\001T*\001N\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nY\n\022ConjugateTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\n8\n\005Const\032\017\n\006output\"\005dtype\"\017\n\005value\022\006tensor\"\r\n\005dtype\022\004type\n>\n\025DebugGradientIdentity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\230\001\001\nG\n\030DebugGradientRefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n(\n\010DeepCopy\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\210\001\001\n\205\001\n\014DepthToSpace\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\235\001\n\nDequantize\022\n\n\005input\"\001T\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\n\n\006output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\n;\n\004Diag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n>\n\010DiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n\271\001\n\014EditDistance\022\026\n\022hypothesis_indices\030\t\022\026\n\021hypothesis_values\"\001T\022\024\n\020hypothesis_shape\030\t\022\021\n\rtruth_indices\030\t\022\021\n\014truth_values\"\001T\022\017\n\013truth_shape\030\t\032\n\n\006output\030\001\"\025\n\tnormalize\022\004bool\032\002(\001\"\t\n\001T\022\004type\nG\n\005Empty\022\t\n\005shape\030\003\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\020\n\004init\022\004bool\032\002(\000\210\001\001\nW\n\nExpandDims\022\n\n\005input\"\001T\022\013\n\003dim\"\004Tdim\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\004Tdim\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\023ExtractImagePatches\022\013\n\006images\"\001T\032\014\n\007patches\"\001T\"\027\n\006ksizes\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\213\001\n\027FakeQuantWithMinMaxArgs\022\n\n\006inputs\030\001\032\013\n\007outputs\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\244\001\n\037FakeQuantWithMinMaxArgsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\032\r\n\tbackprops\030\001\"\023\n\003min\022\005float\032\005%\000\000\300\300\"\023\n\003max\022\005float\032\005%\000\000\300@\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\ns\n\027FakeQuantWithMinMaxVars\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\302\001\n\037FakeQuantWithMinMaxVarsGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n}\n!FakeQuantWithMinMaxVarsPerChannel\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\013\n\007outputs\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n\314\001\n)FakeQuantWithMinMaxVarsPerChannelGradient\022\r\n\tgradients\030\001\022\n\n\006inputs\030\001\022\007\n\003min\030\001\022\007\n\003max\030\001\032\027\n\023backprops_wrt_input\030\001\032\024\n\020backprop_wrt_min\030\001\032\024\n\020backprop_wrt_max\030\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\030\n\014narrow_range\022\004bool\032\002(\000\n^\n\004Fill\022\022\n\004dims\"\nindex_type\022\n\n\005value\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nindex_type\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\006Gather\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\034\n\020validate_indices\022\004bool\032\002(\001\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\np\n\010GatherNd\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\226\001\n\010GatherV2\022\021\n\006params\"\007Tparams\022\023\n\007indices\"\010Tindices\022\r\n\004axis\"\005Taxis\032\021\n\006output\"\007Tparams\"\017\n\007Tparams\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\"\025\n\005Taxis\022\004type:\006\n\0042\002\003\t\n7\n\016GuaranteeConst\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\210\001\001\n.\n\010Identity\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n9\n\tIdentityN\022\n\n\005input2\001T\032\013\n\006output2\001T\"\023\n\001T\022\nlist(type)(\0010\001\n^\n\016ImmutableConst\032\017\n\006tensor\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\"\034\n\022memory_region_name\022\006string\n6\n\nInplaceAdd\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n6\n\nInplaceSub\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n9\n\rInplaceUpdate\022\006\n\001x\"\001T\022\005\n\001i\030\003\022\006\n\001v\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type\n:\n\021InvertPermutation\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\n\\\n\010ListDiff\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\010\n\003out\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nx\n\016MatrixBandPart\022\n\n\005input\"\001T\022\023\n\tnum_lower\"\006Tindex\022\023\n\tnum_upper\"\006Tindex\032\t\n\004band\"\001T\"\t\n\001T\022\004type\"\032\n\006Tindex\022\004type\032\0020\t:\006\n\0042\002\003\t\n3\n\nMatrixDiag\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n6\n\016MatrixDiagPart\022\n\n\005input\"\001T\032\r\n\010diagonal\"\001T\"\t\n\001T\022\004type\nB\n\rMatrixSetDiag\022\n\n\005input\"\001T\022\r\n\010diagonal\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\215\001\n\tMirrorPad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\221\001\n\rMirrorPadGrad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\n\214\001\n\006OneHot\022\r\n\007indices\"\002TI\022\t\n\005depth\030\003\022\r\n\010on_value\"\001T\022\016\n\toff_value\"\001T\032\013\n\006output\"\001T\"\030\n\004axis\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\t\n\001T\022\004type\"\027\n\002TI\022\004type\032\0020\t:\007\n\0052\003\004\003\t\n8\n\010OnesLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\016\023\001\002\006\004\005\021\003\t\010\022\n\nM\n\004Pack\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\n_\n\003Pad\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nw\n\005PadV2\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\022\024\n\017constant_values\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\nV\n\016ParallelConcat\022\016\n\006values\"\001T*\001N\032\013\n\006output\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\016\n\005shape\022\005shape\nC\n\013Placeholder\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\024\n\005shape\022\005shape\032\004:\002\030\001\nw\n\rPlaceholderV2\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shapeB6\010\027\0222Placeholder now behaves the same as PlaceholderV2.\nX\n\026PlaceholderWithDefault\022\016\n\005input\"\005dtype\032\017\n\006output\"\005dtype\"\r\n\005dtype\022\004type\"\016\n\005shape\022\005shape\nL\n\017PreventGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\007message\022\006string\032\002\022\000\n\354\001\n\025QuantizeAndDequantize\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\031\n\tinput_min\022\005float\032\005%\000\000\000\000\"\031\n\tinput_max\022\005float\032\005%\000\000\000\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002B\'\010\026\022#Replaced by QuantizeAndDequantizeV2\n\257\001\n\027QuantizeAndDequantizeV2\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\023\n\010num_bits\022\003int\032\002\030\010\"\027\n\013range_given\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n\250\001\n\027QuantizeAndDequantizeV3\022\n\n\005input\"\001T\022\016\n\tinput_min\"\001T\022\016\n\tinput_max\"\001T\022\014\n\010num_bits\030\003\032\013\n\006output\"\001T\"\030\n\014signed_input\022\004bool\032\002(\001\"\027\n\013range_given\022\004bool\032\002(\001\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n\221\002\n\nQuantizeV2\022\t\n\005input\030\001\022\r\n\tmin_range\030\001\022\r\n\tmax_range\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"C\n\004mode\022\006string\032\016\022\014MIN_COMBINED:#\n!\022\014MIN_COMBINED\022\tMIN_FIRST\022\006SCALED\"R\n\nround_mode\022\006string\032\025\022\023HALF_AWAY_FROM_ZERO:%\n#\022\023HALF_AWAY_FROM_ZERO\022\014HALF_TO_EVEN\n\236\001\n\017QuantizedConcat\022\016\n\nconcat_dim\030\003\022\016\n\006values\"\001T*\001N\022\021\n\ninput_mins\030\001*\001N\022\022\n\013input_maxes\030\001*\001N\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\014\n\001N\022\003int(\0010\002\"\t\n\001T\022\004type\n\205\002\n\025QuantizedInstanceNorm\022\006\n\001x\"\001T\022\t\n\005x_min\030\001\022\t\n\005x_max\030\001\032\006\n\001y\"\001T\032\t\n\005y_min\030\001\032\t\n\005y_max\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\022output_range_given\022\004bool\032\002(\000\"\033\n\013given_y_min\022\005float\032\005%\000\000\000\000\"\033\n\013given_y_max\022\005float\032\005%\000\000\000\000\" \n\020variance_epsilon\022\005float\032\005%\254\305\'7\"\036\n\016min_separation\022\005float\032\005%o\022\203:\n\242\001\n\020QuantizedReshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\013\n\006output\"\001T\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n)\n\004Rank\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\t\n\001T\022\004type\n:\n\013RefIdentity\022\r\n\005input\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\n[\n\007Reshape\022\013\n\006tensor\"\001T\022\017\n\005shape\"\006Tshape\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\002\n\032ResourceStridedSliceAssign\022\007\n\003ref\030\024\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\210\001\001\nK\n\007Reverse\022\013\n\006tensor\"\001T\022\010\n\004dims\030\n\032\013\n\006output\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\004\006\021\005\003\t\n\023\001\002\010\022\007\n\212\001\n\017ReverseSequence\022\n\n\005input\"\001T\022\023\n\013seq_lengths\"\004Tlen\032\013\n\006output\"\001T\"\016\n\007seq_dim\022\003int\"\024\n\tbatch_dim\022\003int\032\002\030\000\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nl\n\tReverseV2\022\013\n\006tensor\"\001T\022\014\n\004axis\"\004Tidx\032\013\n\006output\"\001T\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\001T\022\004type:\022\n\0202\016\004\006\021\005\003\t\n\016\023\001\002\010\022\007\ns\n\tScatterNd\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\022\021\n\005shape\"\010Tindices\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n\221\001\n\027ScatterNdNonAliasingAdd\022\n\n\005input\"\001T\022\023\n\007indices\"\010Tindices\022\014\n\007updates\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nP\n\005Shape\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\ne\n\006ShapeN\022\r\n\005input\"\001T*\001N\032\025\n\006output\"\010out_type*\001N\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\nO\n\004Size\022\n\n\005input\"\001T\032\022\n\006output\"\010out_type\"\t\n\001T\022\004type\"\034\n\010out_type\022\004type\032\0020\003:\006\n\0042\002\003\t\na\n\005Slice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\r\n\004size\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\n.\n\010Snapshot\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\177\n\014SpaceToBatch\022\n\n\005input\"\001T\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\"\025\n\nblock_size\022\003int(\0010\002\n\251\001\n\016SpaceToBatchND\022\n\n\005input\"\001T\022\033\n\013block_shape\"\014Tblock_shape\022\025\n\010paddings\"\tTpaddings\032\013\n\006output\"\001T\"\t\n\001T\022\004type\" \n\014Tblock_shape\022\004type\032\0020\003:\006\n\0042\002\003\t\"\035\n\tTpaddings\022\004type\032\0020\003:\006\n\0042\002\003\t\n\205\001\n\014SpaceToDepth\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\nblock_size\022\003int(\0010\002\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n[\n\005Split\022\r\n\tsplit_dim\030\003\022\n\n\005value\"\001T\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\n\213\001\n\006SplitV\022\n\n\005value\"\001T\022\023\n\013size_splits\"\004Tlen\022\r\n\tsplit_dim\030\003\032\026\n\006output\"\001T*\tnum_split\"\024\n\tnum_split\022\003int(\0010\001\"\t\n\001T\022\004type\"\030\n\004Tlen\022\004type\032\0020\t:\006\n\0042\002\003\t\nN\n\007Squeeze\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\037\n\014squeeze_dims\022\tlist(int)\032\002\n\000(\001\n2\n\014StopGradient\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\366\001\n\014StridedSlice\022\n\n\005input\"\001T\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\220\002\n\022StridedSliceAssign\022\013\n\003ref\"\001T\200\001\001\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\n\n\005value\"\001T\032\022\n\noutput_ref\"\001T\200\001\001\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\n\207\002\n\020StridedSliceGrad\022\016\n\005shape\"\005Index\022\016\n\005begin\"\005Index\022\014\n\003end\"\005Index\022\020\n\007strides\"\005Index\022\007\n\002dy\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\025\n\005Index\022\004type:\006\n\0042\002\003\t\"\025\n\nbegin_mask\022\003int\032\002\030\000\"\023\n\010end_mask\022\003int\032\002\030\000\"\030\n\rellipsis_mask\022\003int\032\002\030\000\"\030\n\rnew_axis_mask\022\003int\032\002\030\000\"\033\n\020shrink_axis_mask\022\003int\032\002\030\000\nc\n\004Tile\022\n\n\005input\"\001T\022\027\n\tmultiples\"\nTmultiples\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\036\n\nTmultiples\022\004type\032\0020\003:\006\n\0042\002\003\t\nm\n\010TileGrad\022\n\n\005input\"\001T\022\r\n\tmultiples\030\003\032\013\n\006output\"\001T\"\t\n\001T\022\004typeB.\010\003\022*TileGrad has been replaced with reduce_sum\nP\n\tTranspose\022\006\n\001x\"\001T\022\r\n\004perm\"\005Tperm\032\006\n\001y\"\001T\"\t\n\001T\022\004type\"\031\n\005Tperm\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unique\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n|\n\010UniqueV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nl\n\020UniqueWithCounts\022\006\n\001x\"\001T\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\230\001\n\022UniqueWithCountsV2\022\006\n\001x\"\001T\022\r\n\004axis\"\005Taxis\032\006\n\001y\"\001T\032\016\n\003idx\"\007out_idx\032\020\n\005count\"\007out_idx\"\t\n\001T\022\004type\"\031\n\005Taxis\022\004type\032\0020\t:\006\n\0042\002\003\t\"\033\n\007out_idx\022\004type\032\0020\003:\006\n\0042\002\003\t\nP\n\006Unpack\022\n\n\005value\"\001T\032\020\n\006output\"\001T*\003num\"\014\n\003num\022\003int(\001\"\t\n\001T\022\004type\"\017\n\004axis\022\003int\032\002\030\000\nW\n\014UnravelIndex\022\017\n\007indices\"\004Tidx\022\014\n\004dims\"\004Tidx\032\016\n\006output\"\004Tidx\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nE\n\005Where\022\n\n\005input\"\001T\032\t\n\005index\030\t\"%\n\001T\022\004type\032\0020\n:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\n&\n\tZerosLike\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\t\n\001T\022\004type"
ctrl_ops = b"\n@\n\005Abort\"\027\n\terror_msg\022\006string\032\002\022\000\"\036\n\022exit_without_error\022\004bool\032\002(\000\n\020\n\016ControlTrigger\ny\n\005Enter\022\t\n\004data\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\024\n\nframe_name\022\006string\"\027\n\013is_constant\022\004bool\032\002(\000\"\036\n\023parallel_iterations\022\003int\032\002\030\n\n)\n\004Exit\022\t\n\004data\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n!\n\010LoopCond\022\t\n\005input\030\n\032\n\n\006output\030\n\nN\n\005Merge\022\016\n\006inputs\"\001T*\001N\032\013\n\006output\"\001T\032\017\n\013value_index\030\003\"\t\n\001T\022\004type\"\014\n\001N\022\003int(\0010\001\n2\n\rNextIteration\022\t\n\004data\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n\006\n\004NoOp\n\202\001\n\010RefEnter\022\014\n\004data\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\"\024\n\nframe_name\022\006string\"\027\n\013is_constant\022\004bool\032\002(\000\"\036\n\023parallel_iterations\022\003int\032\002\030\n\n2\n\007RefExit\022\014\n\004data\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\nW\n\010RefMerge\022\021\n\006inputs\"\001T*\001N\200\001\001\032\016\n\006output\"\001T\200\001\001\032\017\n\013value_index\030\003\"\t\n\001T\022\004type\"\014\n\001N\022\003int(\0010\001\n;\n\020RefNextIteration\022\014\n\004data\"\001T\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\nR\n\tRefSelect\022\t\n\005index\030\003\022\021\n\006inputs\"\001T*\001N\200\001\001\032\016\n\006output\"\001T\200\001\001\"\t\n\001T\022\004type\"\014\n\001N\022\003int(\0010\001\n\\\n\tRefSwitch\022\014\n\004data\"\001T\200\001\001\022\010\n\004pred\030\n\032\024\n\014output_false\"\001T\200\001\001\032\023\n\013output_true\"\001T\200\001\001\"\t\n\001T\022\004type\230\001\001\nM\n\006Switch\022\t\n\004data\"\001T\022\010\n\004pred\030\n\032\021\n\014output_false\"\001T\032\020\n\013output_true\"\001T\"\t\n\001T\022\004type"
linalg_ops = b"\nV\n\rBatchCholesky\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\002\001B\031\010\r\022\025Use Cholesky instead.\ne\n\021BatchCholeskyGrad\022\006\n\001l\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002B\035\010\r\022\031Use CholeskyGrad instead.\nj\n\026BatchMatrixDeterminant\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\001\002\010\022B\"\010\r\022\036Use MatrixDeterminant instead.\nu\n\022BatchMatrixInverse\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\007adjoint\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\002\001B\036\010\r\022\032Use MatrixInverse instead.\n|\n\020BatchMatrixSolve\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\032\013\n\006output\"\001T\"\023\n\007adjoint\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\002\001B\034\010\r\022\030Use MatrixSolve instead.\n\221\001\n\022BatchMatrixSolveLs\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\022\022\n\016l2_regularizer\030\002\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\002\001\"\020\n\004fast\022\004bool\032\002(\001B\036\010\r\022\032Use MatrixSolveLs instead.\n\243\001\n\032BatchMatrixTriangularSolve\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\032\013\n\006output\"\001T\"\021\n\005lower\022\004bool\032\002(\001\"\023\n\007adjoint\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\002\001B&\010\r\022\"Use MatrixTriangularSolve instead.\nd\n\023BatchSelfAdjointEig\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\002\001B!\010\013\022\035Use SelfAdjointEigV2 instead.\n\200\001\n\025BatchSelfAdjointEigV2\022\n\n\005input\"\001T\032\006\n\001e\"\001T\032\006\n\001v\"\001T\"\025\n\tcompute_v\022\004bool\032\002(\001\"\021\n\001T\022\004type:\006\n\0042\002\002\001B!\010\r\022\035Use SelfAdjointEigV2 instead.\n\214\001\n\010BatchSvd\022\n\n\005input\"\001T\032\006\n\001s\"\001T\032\006\n\001u\"\001T\032\006\n\001v\"\001T\"\026\n\ncompute_uv\022\004bool\032\002(\001\"\031\n\rfull_matrices\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022B\024\010\r\022\020Use Svd instead.\n8\n\010Cholesky\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\nA\n\014CholeskyGrad\022\006\n\001l\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n\\\n\024LogMatrixDeterminant\022\n\n\005input\"\001T\032\t\n\004sign\"\001T\032\030\n\023log_abs_determinant\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\001\002\010\022\nA\n\021MatrixDeterminant\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\001\002\010\022\nA\n\021MatrixExponential\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\nR\n\rMatrixInverse\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\023\n\007adjoint\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\n=\n\017MatrixLogarithm\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\010\022\n[\n\013MatrixSolve\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\032\013\n\006output\"\001T\"\023\n\007adjoint\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\nn\n\rMatrixSolveLs\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\022\022\n\016l2_regularizer\030\002\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\"\020\n\004fast\022\004bool\032\002(\001\nx\n\025MatrixTriangularSolve\022\013\n\006matrix\"\001T\022\010\n\003rhs\"\001T\032\013\n\006output\"\001T\"\021\n\005lower\022\004bool\032\002(\001\"\023\n\007adjoint\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\nP\n\002Qr\022\n\n\005input\"\001T\032\006\n\001q\"\001T\032\006\n\001r\"\001T\"\031\n\rfull_matrices\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\n_\n\016SelfAdjointEig\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\002\001B!\010\013\022\035Use SelfAdjointEigV2 instead.\nZ\n\020SelfAdjointEigV2\022\n\n\005input\"\001T\032\006\n\001e\"\001T\032\006\n\001v\"\001T\"\025\n\tcompute_v\022\004bool\032\002(\001\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022\nq\n\003Svd\022\n\n\005input\"\001T\032\006\n\001s\"\001T\032\006\n\001u\"\001T\032\006\n\001v\"\001T\"\026\n\ncompute_uv\022\004bool\032\002(\001\"\031\n\rfull_matrices\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\002\001\010\022"
dataflow_ops = b"\nr\n\030AccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\021\n\010gradient\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n?\n\031AccumulatorNumAccumulated\022\r\n\006handle\030\007\200\001\001\032\023\n\017num_accumulated\030\003\n>\n\030AccumulatorSetGlobalStep\022\r\n\006handle\030\007\200\001\001\022\023\n\017new_global_step\030\t\nr\n\027AccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\020\n\007average\"\005dtype\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\255\001\n\007Barrier\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\nB\n\014BarrierClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\n0\n\025BarrierIncompleteSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\\\n\021BarrierInsertMany\022\r\n\006handle\030\007\200\001\001\022\010\n\004keys\030\007\022\013\n\006values\"\001T\"\t\n\001T\022\004type\"\026\n\017component_index\022\003int\n+\n\020BarrierReadySize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n\347\001\n\017BarrierTakeMany\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_elements\030\003\032\013\n\007indices\030\t\032\010\n\004keys\030\007\032\031\n\006values2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\035\n\021allow_small_batch\022\004bool\032\002(\000\"\037\n\023wait_for_incomplete\022\004bool\032\002(\000\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\224\001\n\026ConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n$\n\023DeleteSessionTensor\022\n\n\006handle\030\007\210\001\001\nq\n\020DynamicPartition\022\t\n\004data\"\001T\022\016\n\npartitions\030\003\032\034\n\007outputs\"\001T*\016num_partitions\"\031\n\016num_partitions\022\003int(\0010\001\"\t\n\001T\022\004type\nS\n\rDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\257\001\n\tFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\256\001\n\013FIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n+\n\tFakeQueue\022\014\n\010resource\030\024\032\r\n\006handle\030\007\200\001\001\210\001\001\n8\n\020GetSessionHandle\022\n\n\005value\"\001T\032\n\n\006handle\030\007\"\t\n\001T\022\004type\210\001\001\n:\n\022GetSessionHandleV2\022\n\n\005value\"\001T\032\n\n\006handle\030\024\"\t\n\001T\022\004type\210\001\001\n@\n\020GetSessionTensor\022\n\n\006handle\030\007\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n\211\001\n\010MapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\021MapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\264\001\n\007MapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\222\001\n\007MapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\325\001\n\010MapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\267\001\n\nMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\274\001\n\017MapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\220\001\n\017OrderedMapClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\243\001\n\030OrderedMapIncompleteSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\273\001\n\016OrderedMapPeek\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\231\001\n\016OrderedMapSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\334\001\n\017OrderedMapStage\022\007\n\003key\030\t\022\013\n\007indices\030\003\022\025\n\006values2\013fake_dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\035\n\013fake_dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\276\001\n\021OrderedMapUnstage\022\007\n\003key\030\t\022\013\n\007indices\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\303\001\n\026OrderedMapUnstageNoKey\022\013\n\007indices\030\003\032\007\n\003key\030\t\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\266\001\n\020PaddingFIFOQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\265\001\n\022PaddingFIFOQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n[\n\025ParallelDynamicStitch\022\016\n\007indices\030\003*\001N\022\014\n\004data\"\001T*\001N\032\013\n\006merged\"\001T\"\014\n\001N\022\003int(\0010\001\"\t\n\001T\022\004type\n\261\001\n\rPriorityQueue\032\r\n\006handle\030\007\200\001\001\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\260\001\n\017PriorityQueueV2\032\n\n\006handle\030\024\"#\n\017component_types\022\nlist(type)\032\002\n\000(\001\"\027\n\006shapes\022\013list(shape)(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n@\n\nQueueClose\022\r\n\006handle\030\007\200\001\001\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\nB\n\014QueueCloseV2\022\n\n\006handle\030\024\"#\n\027cancel_pending_enqueues\022\004bool\032\002(\000\210\001\001\n\177\n\014QueueDequeue\022\r\n\006handle\030\007\200\001\001\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\212\001\n\020QueueDequeueMany\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueManyV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\212\001\n\020QueueDequeueUpTo\022\r\n\006handle\030\007\200\001\001\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n\214\001\n\022QueueDequeueUpToV2\022\n\n\006handle\030\024\022\005\n\001n\030\003\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n\201\001\n\016QueueDequeueV2\022\n\n\006handle\030\024\032\035\n\ncomponents2\017component_types\"!\n\017component_types\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\nw\n\014QueueEnqueue\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n{\n\020QueueEnqueueMany\022\r\n\006handle\030\007\200\001\001\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\n}\n\022QueueEnqueueManyV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\ny\n\016QueueEnqueueV2\022\n\n\006handle\030\024\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\"\036\n\ntimeout_ms\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001\n-\n\rQueueIsClosed\022\r\n\006handle\030\007\200\001\001\032\r\n\tis_closed\030\n\n/\n\017QueueIsClosedV2\022\n\n\006handle\030\024\032\r\n\tis_closed\030\n\210\001\001\n$\n\tQueueSize\022\r\n\006handle\030\007\200\001\001\032\010\n\004size\030\003\n&\n\013QueueSizeV2\022\n\n\006handle\030\024\032\010\n\004size\030\003\210\001\001\n\371\001\n\022RandomShuffleQueue\032\r\n\006handle\030\007\200\001\001\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\370\001\n\024RandomShuffleQueueV2\032\n\n\006handle\030\024\"!\n\017component_types\022\nlist(type)(\0010\001\"\033\n\006shapes\022\013list(shape)\032\002\n\000(\001\"\034\n\010capacity\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\"\034\n\021min_after_dequeue\022\003int\032\002\030\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\357\001\n\013RecordInput\032\013\n\007records\030\007\"\026\n\014file_pattern\022\006string\"\034\n\020file_random_seed\022\003int\032\003\030\255\002\"(\n\030file_shuffle_shift_ratio\022\005float\032\005%\000\000\000\000\"\034\n\020file_buffer_size\022\003int\032\003\030\220N\"\033\n\020file_parallelism\022\003int\032\002\030\020\"\025\n\nbatch_size\022\003int\032\002\030 \"\036\n\020compression_type\022\006string\032\002\022\000\210\001\001\n\302\001\n\036SparseAccumulatorApplyGradient\022\r\n\006handle\030\007\200\001\001\022\016\n\nlocal_step\030\t\022\024\n\020gradient_indices\030\t\022\030\n\017gradient_values\"\005dtype\022\022\n\016gradient_shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\027\n\017has_known_shape\022\004bool\n\217\001\n\035SparseAccumulatorTakeGradient\022\r\n\006handle\030\007\200\001\001\022\020\n\014num_required\030\003\032\013\n\007indices\030\t\032\017\n\006values\"\005dtype\032\t\n\005shape\030\t\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\232\001\n\034SparseConditionalAccumulator\032\r\n\006handle\030\007\200\001\001\"$\n\005dtype\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\nF\n\005Stack\032\r\n\006handle\030\007\200\001\001\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\033\n\nStackClose\022\r\n\006handle\030\007\200\001\001\n\035\n\014StackCloseV2\022\n\n\006handle\030\024\210\001\001\n?\n\010StackPop\022\r\n\006handle\030\007\200\001\001\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\nA\n\nStackPopV2\022\n\n\006handle\030\024\032\021\n\004elem\"\telem_type\"\021\n\telem_type\022\004type\210\001\001\nV\n\tStackPush\022\r\n\006handle\030\007\200\001\001\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\nX\n\013StackPushV2\022\n\n\006handle\030\024\022\t\n\004elem\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\"\027\n\013swap_memory\022\004bool\032\002(\000\210\001\001\nS\n\007StackV2\022\014\n\010max_size\030\003\032\n\n\006handle\030\024\"\021\n\telem_type\022\004type\"\030\n\nstack_name\022\006string\032\002\022\000\210\001\001\n\234\001\n\005Stage\022\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\213\001\n\nStageClear\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\253\001\n\tStagePeek\022\t\n\005index\030\003\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\224\001\n\tStageSize\032\010\n\004size\030\003\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\024\n\006dtypes\022\nlist(type)\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\306\001\n\013TensorArray\022\010\n\004size\030\003\032\r\n\006handle\030\007\200\001\001\"\r\n\005dtype\022\004type\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\025\010\020\022\021Use TensorArrayV3\210\001\001\n=\n\020TensorArrayClose\022\r\n\006handle\030\007\200\001\001B\032\010\020\022\026Use TensorArrayCloseV3\n<\n\022TensorArrayCloseV2\022\n\n\006handle\030\007B\032\010\032\022\026Use TensorArrayCloseV3\n#\n\022TensorArrayCloseV3\022\n\n\006handle\030\024\210\001\001\n\234\001\n\021TensorArrayConcat\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001B\031\010\020\022\025Use TensorArrayGradV3\n\200\001\n\023TensorArrayConcatV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\n\203\001\n\023TensorArrayConcatV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\032\013\n\007lengths\030\t\"\r\n\005dtype\022\004type\"$\n\025element_shape_except0\022\005shape\032\004:\002\030\001\210\001\001\n\226\001\n\021TensorArrayGather\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\020\022\027Use TensorArrayGatherV3\n\225\001\n\023TensorArrayGatherV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B\033\010\032\022\027Use TensorArrayGatherV3\n{\n\023TensorArrayGatherV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\210\001\001\nn\n\017TensorArrayGrad\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\022\n\013grad_handle\030\007\200\001\001\"\020\n\006source\022\006stringB\031\010\020\022\025Use TensorArrayGradV3\210\001\001\nm\n\021TensorArrayGradV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\007\"\020\n\006source\022\006stringB\031\010\032\022\025Use TensorArrayGradV3\210\001\001\n`\n\021TensorArrayGradV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\017\n\013grad_handle\030\024\032\014\n\010flow_out\030\001\"\020\n\006source\022\006string\210\001\001\n\224\001\n\017TensorArrayPack\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001B(\010\020\022$Use TensorArrayGatherV3 with RangeOp\nr\n\017TensorArrayRead\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\020\022\025Use TensorArrayReadV3\nq\n\021TensorArrayReadV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004typeB\031\010\032\022\025Use TensorArrayReadV3\nY\n\021TensorArrayReadV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\013\n\007flow_in\030\001\032\016\n\005value\"\005dtype\"\r\n\005dtype\022\004type\210\001\001\n}\n\022TensorArrayScatter\022\r\n\006handle\030\007\200\001\001\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\031\010\023\022\025Use TensorArrayGradV3\n\177\n\024TensorArrayScatterV2\022\n\n\006handle\030\007\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\034\010\032\022\030Use TensorArrayScatterV3\nd\n\024TensorArrayScatterV3\022\n\n\006handle\030\024\022\013\n\007indices\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\nR\n\017TensorArraySize\022\r\n\006handle\030\007\200\001\001\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\020\022\025Use TensorArraySizeV3\nQ\n\021TensorArraySizeV2\022\n\n\006handle\030\007\022\013\n\007flow_in\030\001\032\010\n\004size\030\003B\031\010\032\022\025Use TensorArraySizeV3\n9\n\021TensorArraySizeV3\022\n\n\006handle\030\024\022\013\n\007flow_in\030\001\032\010\n\004size\030\003\210\001\001\n|\n\020TensorArraySplit\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArraySplitV3\n{\n\022TensorArraySplitV2\022\n\n\006handle\030\007\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArraySplitV3\nb\n\022TensorArraySplitV3\022\n\n\006handle\030\024\022\n\n\005value\"\001T\022\013\n\007lengths\030\t\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\177\n\021TensorArrayUnpack\022\r\n\006handle\030\007\200\001\001\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB)\010\024\022%Use TensorArrayScatterV3 with RangeOp\n\305\001\n\rTensorArrayV2\022\010\n\004size\030\003\032\n\n\006handle\030\007\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"\037\n\021tensor_array_name\022\006string\032\002\022\000B\025\010\032\022\021Use TensorArrayV3\210\001\001\n\336\001\n\rTensorArrayV3\022\010\n\004size\030\003\032\n\n\006handle\030\024\032\010\n\004flow\030\001\"\r\n\005dtype\022\004type\"\034\n\relement_shape\022\005shape\032\004:\002\030\001\"\030\n\014dynamic_size\022\004bool\032\002(\000\"\034\n\020clear_after_read\022\004bool\032\002(\001\"$\n\030identical_element_shapes\022\004bool\032\002(\000\"\037\n\021tensor_array_name\022\006string\032\002\022\000\210\001\001\nz\n\020TensorArrayWrite\022\r\n\006handle\030\007\200\001\001\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\020\022\026Use TensorArrayWriteV3\ny\n\022TensorArrayWriteV2\022\n\n\006handle\030\007\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004typeB\032\010\032\022\026Use TensorArrayWriteV3\n`\n\022TensorArrayWriteV3\022\n\n\006handle\030\024\022\t\n\005index\030\003\022\n\n\005value\"\001T\022\013\n\007flow_in\030\001\032\014\n\010flow_out\030\001\"\t\n\001T\022\004type\210\001\001\n\236\001\n\007Unstage\032\020\n\006values2\006dtypes\"\025\n\010capacity\022\003int\032\002\030\000(\001\"\031\n\014memory_limit\022\003int\032\002\030\000(\001\"\030\n\006dtypes\022\nlist(type)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001"
dataset_ops = b"\n\177\n\014BatchDataset\022\021\n\rinput_dataset\030\025\022\016\n\nbatch_size\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\205\001\n\031BytesProducedStatsDataset\022\021\n\rinput_dataset\030\025\022\007\n\003tag\030\007\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n}\n\014CacheDataset\022\021\n\rinput_dataset\030\025\022\014\n\010filename\030\007\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\212\001\n\022ConcatenateDataset\022\021\n\rinput_dataset\030\025\022\023\n\017another_dataset\030\025\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\203\001\n\026DatasetToSingleElement\022\013\n\007dataset\030\025\032\032\n\ncomponents2\014output_types\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\233\001\n\031DenseToSparseBatchDataset\022\021\n\rinput_dataset\030\025\022\016\n\nbatch_size\030\t\022\r\n\trow_shape\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n=\n\023DeserializeIterator\022\023\n\017resource_handle\030\024\022\016\n\nserialized\030\025\210\001\001\n_\n\025EnqueueInQueueDataset\022\t\n\005queue\030\025\022\031\n\ncomponents2\013Tcomponents\"\035\n\013Tcomponents\022\nlist(type)(\0010\001\210\001\001\n\276\001\n\rFilterDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\032\n\n\006handle\030\025\"\021\n\tpredicate\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\177\n\030FixedLengthRecordDataset\022\r\n\tfilenames\030\007\022\020\n\014header_bytes\030\t\022\020\n\014record_bytes\030\t\022\020\n\014footer_bytes\030\t\022\017\n\013buffer_size\030\t\032\n\n\006handle\030\025\210\001\001\n\267\001\n\016FlatMapDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\212\003\n\020GeneratorDataset\022\'\n\024init_func_other_args2\017Tinit_func_args\022\'\n\024next_func_other_args2\017Tnext_func_args\022/\n\030finalize_func_other_args2\023Tfinalize_func_args\032\n\n\006handle\030\025\"\021\n\tinit_func\022\004func\"\021\n\tnext_func\022\004func\"\025\n\rfinalize_func\022\004func\"\037\n\017Tinit_func_args\022\nlist(type)(\001\"\037\n\017Tnext_func_args\022\nlist(type)(\001\"#\n\023Tfinalize_func_args\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n\377\003\n\024GroupByWindowDataset\022\021\n\rinput_dataset\030\025\0225\n\030key_func_other_arguments2\031Tkey_func_other_arguments\022;\n\033reduce_func_other_arguments2\034Treduce_func_other_arguments\022E\n window_size_func_other_arguments2!Twindow_size_func_other_arguments\032\n\n\006handle\030\025\"\020\n\010key_func\022\004func\"\023\n\013reduce_func\022\004func\"\030\n\020window_size_func\022\004func\")\n\031Tkey_func_other_arguments\022\nlist(type)(\001\",\n\034Treduce_func_other_arguments\022\nlist(type)(\001\"1\n!Twindow_size_func_other_arguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\336\001\n\021InterleaveDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\022\020\n\014cycle_length\030\t\022\020\n\014block_length\030\t\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\207\001\n\010Iterator\032\n\n\006handle\030\024\"\025\n\013shared_name\022\006string\"\023\n\tcontainer\022\006string\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n\213\001\n\030IteratorFromStringHandle\022\021\n\rstring_handle\030\007\032\023\n\017resource_handle\030\024\" \n\014output_types\022\nlist(type)\032\002\n\000(\001\"\"\n\routput_shapes\022\013list(shape)\032\002\n\000(\001\210\001\001\n\200\001\n\017IteratorGetNext\022\014\n\010iterator\030\024\032\032\n\ncomponents2\014output_types\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n\204\001\n\023IteratorGetNextSync\022\014\n\010iterator\030\024\032\032\n\ncomponents2\014output_types\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\nQ\n\032IteratorSetStatsAggregator\022\023\n\017iterator_handle\030\024\022\033\n\027stats_aggregator_handle\030\024\210\001\001\nC\n\026IteratorToStringHandle\022\023\n\017resource_handle\030\024\032\021\n\rstring_handle\030\007\210\001\001\n\177\n\023LatencyStatsDataset\022\021\n\rinput_dataset\030\025\022\007\n\003tag\030\007\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n,\n\014MakeIterator\022\013\n\007dataset\030\025\022\014\n\010iterator\030\024\210\001\001\n\371\001\n\022MapAndBatchDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\022\016\n\nbatch_size\030\t\022\030\n\024num_parallel_batches\030\t\022\022\n\016drop_remainder\030\n\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\263\001\n\nMapDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\257\001\n\017OneShotIterator\032\n\n\006handle\030\024\"\027\n\017dataset_factory\022\004func\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n\313\001\n\022PaddedBatchDataset\022\021\n\rinput_dataset\030\025\022\016\n\nbatch_size\030\t\022\024\n\rpadded_shapes\030\t*\001N\022\037\n\016padding_values2\rToutput_types\032\n\n\006handle\030\025\"\037\n\rToutput_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\"\014\n\001N\022\003int(\0010\001\n\253\002\n\031ParallelInterleaveDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\022\020\n\014cycle_length\030\t\022\020\n\014block_length\030\t\022\n\n\006sloppy\030\n\022\032\n\026buffer_output_elements\030\t\022\033\n\027prefetch_input_elements\030\t\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\323\001\n\022ParallelMapDataset\022\021\n\rinput_dataset\030\025\022\035\n\017other_arguments2\nTarguments\022\026\n\022num_parallel_calls\030\003\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\203\001\n\017PrefetchDataset\022\021\n\rinput_dataset\030\025\022\017\n\013buffer_size\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\336\001\n%PrependFromQueueAndPaddedBatchDataset\022\021\n\rinput_dataset\030\025\022\016\n\nbatch_size\030\t\022\024\n\rpadded_shapes\030\t*\001N\022\037\n\016padding_values2\rToutput_types\032\n\n\006handle\030\025\"\037\n\rToutput_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\"\014\n\001N\022\003int(\0010\001\nu\n\rRandomDataset\022\010\n\004seed\030\t\022\t\n\005seed2\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n~\n\014RangeDataset\022\t\n\005start\030\t\022\010\n\004stop\030\t\022\010\n\004step\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n{\n\rRepeatDataset\022\021\n\rinput_dataset\030\025\022\t\n\005count\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\347\001\n\013ScanDataset\022\021\n\rinput_dataset\030\025\022\027\n\rinitial_state2\006Tstate\022\035\n\017other_arguments2\nTarguments\032\n\n\006handle\030\025\"\t\n\001f\022\004func\"\030\n\006Tstate\022\nlist(type)(\0010\001\"\032\n\nTarguments\022\nlist(type)(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n;\n\021SerializeIterator\022\023\n\017resource_handle\030\024\032\016\n\nserialized\030\025\210\001\001\n\253\001\n\027ShuffleAndRepeatDataset\022\021\n\rinput_dataset\030\025\022\017\n\013buffer_size\030\t\022\010\n\004seed\030\t\022\t\n\005seed2\030\t\022\t\n\005count\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\275\001\n\016ShuffleDataset\022\021\n\rinput_dataset\030\025\022\017\n\013buffer_size\030\t\022\010\n\004seed\030\t\022\t\n\005seed2\030\t\032\n\n\006handle\030\025\"$\n\030reshuffle_each_iteration\022\004bool\032\002(\001\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\ny\n\013SkipDataset\022\021\n\rinput_dataset\030\025\022\t\n\005count\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n\214\001\n\014SlideDataset\022\021\n\rinput_dataset\030\025\022\017\n\013window_size\030\t\022\n\n\006stride\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\nk\n\030SparseTensorSliceDataset\022\013\n\007indices\030\t\022\021\n\006values\"\007Tvalues\022\017\n\013dense_shape\030\t\032\n\n\006handle\030\025\"\017\n\007Tvalues\022\004type\210\001\001\n\217\001\n\nSqlDataset\022\017\n\013driver_name\030\007\022\024\n\020data_source_name\030\007\022\t\n\005query\030\007\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\nZ\n\025StatsAggregatorHandle\032\n\n\006handle\030\024\"\027\n\tcontainer\022\006string\032\002\022\000\"\031\n\013shared_name\022\006string\032\002\022\000\210\001\001\n6\n\026StatsAggregatorSummary\022\014\n\010iterator\030\024\032\013\n\007summary\030\007\210\001\001\nV\n\017TFRecordDataset\022\r\n\tfilenames\030\007\022\024\n\020compression_type\030\007\022\017\n\013buffer_size\030\t\032\n\n\006handle\030\025\210\001\001\ny\n\013TakeDataset\022\021\n\rinput_dataset\030\025\022\t\n\005count\030\t\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\n~\n\rTensorDataset\022\033\n\ncomponents2\rToutput_types\032\n\n\006handle\030\025\"\037\n\rToutput_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\n\203\001\n\022TensorSliceDataset\022\033\n\ncomponents2\rToutput_types\032\n\n\006handle\030\025\"\037\n\rToutput_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\210\001\001\nV\n\017TextLineDataset\022\r\n\tfilenames\030\007\022\024\n\020compression_type\030\007\022\017\n\013buffer_size\030\t\032\n\n\006handle\030\025\210\001\001\n\177\n\nZipDataset\022\025\n\016input_datasets\030\025*\001N\032\n\n\006handle\030\025\"\036\n\014output_types\022\nlist(type)(\0010\001\" \n\routput_shapes\022\013list(shape)(\0010\001\"\014\n\001N\022\003int(\0010\001"
fid = open(r"E:\github\fitzwang\serving\graph\src\main\resources\dataset_ops.pb", "wb")
fid.write(dataset_ops)
fid.flush()
fid.close()
| 5,496.647059
| 25,748
| 0.761662
| 18,802
| 93,443
| 3.745399
| 0.059887
| 0.039164
| 0.028741
| 0.051121
| 0.81852
| 0.799776
| 0.776274
| 0.755598
| 0.742861
| 0.733659
| 0
| 0.431106
| 0.001734
| 93,443
| 16
| 25,749
| 5,840.1875
| 0.323828
| 0
| 0
| 0
| 0
| 28.1
| 0.494708
| 0.484006
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
5e073e5203c38f8843b9959ee1334cd0328d41d1
| 7,560
|
py
|
Python
|
src/pymor/domaindescriptions/basic.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/domaindescriptions/basic.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/domaindescriptions/basic.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.domaindescriptions.boundarytypes import BoundaryType
from pymor.domaindescriptions.interfaces import DomainDescriptionInterface
class RectDomain(DomainDescriptionInterface):
"""Describes a rectangular domain.
|BoundaryTypes| can be associated edgewise.
Parameters
----------
domain
List of two points defining the lower-left and upper-right corner
of the domain.
left
The |BoundaryType| of the left edge.
right
The |BoundaryType| of the right edge.
top
The |BoundaryType| of the top edge.
bottom
The |BoundaryType| of the bottom edge.
Attributes
----------
domain
left
right
top
bottom
"""
dim = 2
def __init__(self, domain=([0, 0], [1, 1]), left=BoundaryType('dirichlet'), right=BoundaryType('dirichlet'),
top=BoundaryType('dirichlet'), bottom=BoundaryType('dirichlet')):
assert domain[0][0] <= domain[1][0]
assert domain[0][1] <= domain[1][1]
assert left is None or isinstance(left, BoundaryType)
assert right is None or isinstance(right, BoundaryType)
assert top is None or isinstance(top, BoundaryType)
assert bottom is None or isinstance(bottom, BoundaryType)
self.boundary_types = frozenset({left, right, top, bottom})
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.domain = np.array(domain)
@property
def lower_left(self):
return self.domain[0]
@property
def upper_right(self):
return self.domain[1]
@property
def width(self):
return self.domain[1, 0] - self.domain[0, 0]
@property
def height(self):
return self.domain[1, 1] - self.domain[0, 1]
@property
def volume(self):
return self.width * self.height
@property
def diameter(self):
return np.sqrt(self.width ** 2 + self.height ** 2)
def __repr__(self):
left = ', left=' + repr(self.left) if self.left != BoundaryType('dirichlet') else ''
right = ', right=' + repr(self.right) if self.right != BoundaryType('dirichlet') else ''
top = ', top=' + repr(self.top) if self.top != BoundaryType('dirichlet') else ''
bottom = ', bottom=' + repr(self.bottom) if self.bottom != BoundaryType('dirichlet') else ''
return 'RectDomain({}{})'.format(str(self.domain).replace('\n', ','), left + right + top + bottom)
class CylindricalDomain(DomainDescriptionInterface):
"""Describes a cylindrical domain.
|BoundaryTypes| can be associated edgewise.
Parameters
----------
domain
List of two points defining the lower-left and upper-right corner
of the domain. The left and right edge are identified.
top
The |BoundaryType| of the top edge.
bottom
The |BoundaryType| of the bottom edge.
Attributes
----------
domain
top
bottom
"""
dim = 2
def __init__(self, domain=([0, 0], [1, 1]), top=BoundaryType('dirichlet'), bottom=BoundaryType('dirichlet')):
assert domain[0][0] <= domain[1][0]
assert domain[0][1] <= domain[1][1]
assert top is None or isinstance(top, BoundaryType)
assert bottom is None or isinstance(bottom, BoundaryType)
self.boundary_types = frozenset({top, bottom})
self.top = top
self.bottom = bottom
self.domain = np.array(domain)
@property
def lower_left(self):
return self.domain[0]
@property
def upper_right(self):
return self.domain[1]
@property
def width(self):
return self.domain[1, 0] - self.domain[0, 0]
@property
def height(self):
return self.domain[1, 1] - self.domain[0, 1]
@property
def volume(self):
return self.width * self.height
@property
def diameter(self):
return np.sqrt(self.width ** 2 + self.height ** 2)
def __repr__(self):
top = ', top=' + repr(self.top) if self.top != BoundaryType('dirichlet') else ''
bottom = ', bottom=' + repr(self.bottom) if self.bottom != BoundaryType('dirichlet') else ''
return 'CylindricalDomain({}{})'.format(str(self.domain).replace('\n', ','), top + bottom)
class TorusDomain(DomainDescriptionInterface):
"""Describes a domain with the topology of a torus.
Parameters
----------
domain
List of two points defining the lower-left and upper-right corner
of the domain. The left and right edge are identified, as well as the
bottom and top edge
Attributes
----------
domain
"""
dim = 2
def __init__(self, domain=([0, 0], [1, 1])):
assert domain[0][0] <= domain[1][0]
assert domain[0][1] <= domain[1][1]
self.boundary_types = frozenset()
self.domain = np.array(domain)
@property
def lower_left(self):
return self.domain[0]
@property
def upper_right(self):
return self.domain[1]
@property
def width(self):
return self.domain[1, 0] - self.domain[0, 0]
@property
def height(self):
return self.domain[1, 1] - self.domain[0, 1]
@property
def volume(self):
return self.width * self.height
@property
def diameter(self):
return np.sqrt(self.width ** 2 + self.height ** 2)
def __repr__(self):
return 'TorusDomain({})'.format(str(self.domain).replace('\n', ','))
class LineDomain(DomainDescriptionInterface):
"""Describes an interval domain.
|BoundaryTypes| can be associated edgewise.
Parameters
----------
domain
List [x_l, x_r] providing the left and right endpoint.
left
The |BoundaryType| of the left endpoint.
right
The |BoundaryType| of the right endpoint.
Attributes
----------
domain
left
right
"""
dim = 1
def __init__(self, domain=(0, 1), left=BoundaryType('dirichlet'), right=BoundaryType('dirichlet')):
assert domain[0] <= domain[1]
assert left is None or isinstance(left, BoundaryType)
assert right is None or isinstance(right, BoundaryType)
self.boundary_types = frozenset({left, right})
self.left = left
self.right = right
self.domain = np.array(domain)
@property
def width(self):
return self.domain[1] - self.domain[0]
def __repr__(self):
left = ', left=' + repr(self.left) if self.left != BoundaryType('dirichlet') else ''
right = ', right=' + repr(self.right) if self.right != BoundaryType('dirichlet') else ''
return 'LineDomain({}{})'.format(self.domain, left + right)
class CircleDomain(DomainDescriptionInterface):
"""Describes a domain with the topology of a circle, i.e. a line with
identified end points.
Parameters
----------
domain
List [x_l, x_r] providing the left and right endpoint.
Attributes
----------
domain
"""
dim = 1
def __init__(self, domain=(0, 1)):
assert domain[0] <= domain[1]
self.domain = np.array(domain)
@property
def width(self):
return self.domain[1] - self.domain[0]
def __repr__(self):
return 'CircleDomain({})'.format(self.domain)
| 27.896679
| 113
| 0.612434
| 915
| 7,560
| 5.001093
| 0.122404
| 0.080857
| 0.05201
| 0.061189
| 0.807037
| 0.798514
| 0.755463
| 0.712194
| 0.702142
| 0.66674
| 0
| 0.017666
| 0.25873
| 7,560
| 270
| 114
| 28
| 0.798894
| 0.240079
| 0
| 0.823077
| 0
| 0
| 0.055125
| 0.00424
| 0
| 0
| 0
| 0
| 0.123077
| 1
| 0.230769
| false
| 0
| 0.023077
| 0.169231
| 0.523077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
5e38be8553ec4aef01c0ab8b1b427e14572475e3
| 179
|
py
|
Python
|
cms_timetravel/tests/__init__.py
|
jjanssen/django-cms-timetravel
|
2a55f929d873eb22af4e3c751f970ca070ae2f0e
|
[
"Apache-2.0"
] | null | null | null |
cms_timetravel/tests/__init__.py
|
jjanssen/django-cms-timetravel
|
2a55f929d873eb22af4e3c751f970ca070ae2f0e
|
[
"Apache-2.0"
] | null | null | null |
cms_timetravel/tests/__init__.py
|
jjanssen/django-cms-timetravel
|
2a55f929d873eb22af4e3c751f970ca070ae2f0e
|
[
"Apache-2.0"
] | null | null | null |
from cms_timetravel.tests.admin_views import *
from cms_timetravel.tests.managers import *
from cms_timetravel.tests.middleware import *
from cms_timetravel.tests.plugins import *
| 44.75
| 46
| 0.849162
| 25
| 179
| 5.88
| 0.4
| 0.190476
| 0.462585
| 0.598639
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083799
| 179
| 4
| 47
| 44.75
| 0.896341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eaaef563aba90c025c39d8ed53c92c3be0212427
| 13,625
|
py
|
Python
|
modules/tests/photons_app_tests/helpers/test_queue.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 51
|
2020-07-03T08:34:48.000Z
|
2022-03-16T10:56:08.000Z
|
modules/tests/photons_app_tests/helpers/test_queue.py
|
delfick/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 81
|
2020-07-03T08:13:59.000Z
|
2022-03-31T23:02:54.000Z
|
modules/tests/photons_app_tests/helpers/test_queue.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 8
|
2020-07-24T23:48:20.000Z
|
2021-05-24T17:20:16.000Z
|
# coding: spec
from photons_app import helpers as hp
from queue import Queue as NormalQueue
from collections import deque
import asyncio
import pytest
@pytest.fixture()
def final_future():
fut = hp.create_future()
try:
yield fut
finally:
fut.cancel()
describe "Queue":
it "takes in a final_future", final_future:
queue = hp.Queue(final_future)
compare = pytest.helpers.child_future_of(final_future)
assert queue.final_future == compare
assert hp.fut_has_callback(queue.final_future, queue._stop_waiter)
assert isinstance(queue.collection, deque)
assert isinstance(queue.waiter, hp.ResettableFuture)
assert not queue.waiter.done()
async it "can stop the waiter on done", final_future:
queue = hp.Queue(final_future)
assert isinstance(queue.waiter, hp.ResettableFuture)
assert not queue.waiter.done()
final_future.cancel()
await asyncio.sleep(0.001)
assert queue.waiter.done()
# And if the waiter was already done
queue = hp.Queue(final_future)
assert isinstance(queue.waiter, hp.ResettableFuture)
queue.waiter.set_result(True)
final_future.cancel()
await asyncio.sleep(0.001)
assert queue.waiter.done()
async it "can get remaining items", final_future:
queue = hp.Queue(final_future)
assert not queue.waiter.done()
queue.append(1)
assert queue.waiter.done()
queue.append(2)
assert list(queue.remaining()) == [1, 2]
assert not queue.collection
describe "getting all results":
async it "can get results until final_future is done", final_future:
wait = hp.create_future()
queue = hp.Queue(final_future)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
async for item in queue:
if item == 5:
final_future.cancel()
found.append(item)
if item == 4:
wait.set_result(True)
finally:
ff.cancel()
# The queue will drop remaining items
assert found == [1, 2, 3, 4, 5]
assert list(queue.remaining()) == [6, 7]
async it "ignores results added after final_future is done if still waiting for results", final_future:
wait = hp.create_future()
queue = hp.Queue(final_future)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
final_future.cancel()
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
async for item in queue:
found.append(item)
if item == 4:
wait.set_result(True)
finally:
ff.cancel()
# The queue will drop remaining items
assert found == [1, 2, 3, 4]
assert list(queue.remaining()) == [5, 6, 7]
async it "is re-entrant if we break", final_future:
found = []
queue = hp.Queue(final_future)
for i in range(10):
queue.append(i)
async for item in queue:
found.append(item)
if item == 3:
break
assert found == [0, 1, 2, 3]
async for item in queue:
found.append(item)
if item == 9:
final_future.cancel()
assert found == list(range(10))
describe "getting all results and empty_on_finished":
async it "can get results until final_future is done", final_future:
wait = hp.create_future()
queue = hp.Queue(final_future, empty_on_finished=True)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
async for item in queue:
if item == 5:
final_future.cancel()
found.append(item)
if item == 4:
wait.set_result(True)
finally:
ff.cancel()
# The queue will not drop remaining items
assert found == [1, 2, 3, 4, 5, 6, 7]
assert list(queue.remaining()) == []
async it "gets results added after final_future is done if still waiting for results", final_future:
wait = hp.create_future()
queue = hp.Queue(final_future, empty_on_finished=True)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
final_future.cancel()
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
async for item in queue:
found.append(item)
if item == 4:
wait.set_result(True)
finally:
ff.cancel()
# The queue will not drop remaining items
assert found == [1, 2, 3, 4, 5, 6, 7]
assert list(queue.remaining()) == []
async it "is re-entrant if we break", final_future:
found = []
queue = hp.Queue(final_future, empty_on_finished=True)
for i in range(10):
queue.append(i)
async for item in queue:
found.append(item)
if item == 3:
break
assert found == [0, 1, 2, 3]
async for item in queue:
found.append(item)
if item == 9:
final_future.cancel()
assert found == list(range(10))
describe "SyncQueue":
it "takes in a final_future", final_future:
queue = hp.SyncQueue(final_future)
compare = pytest.helpers.child_future_of(final_future)
assert queue.final_future == compare
assert queue.timeout == 0.05
assert isinstance(queue.collection, NormalQueue)
queue = hp.SyncQueue(final_future, timeout=1)
assert queue.timeout == 1
it "can append items", final_future:
queue = hp.SyncQueue(final_future)
queue.append(1)
queue.append(2)
found = []
for item in queue:
found.append(item)
if item == 2:
break
assert found == [1, 2]
queue.append(3)
found = []
for item in queue:
found.append(item)
final_future.cancel()
assert found == [3]
async it "can get remaining items", final_future:
queue = hp.SyncQueue(final_future)
queue.append(1)
queue.append(2)
assert list(queue.remaining()) == [1, 2]
assert queue.collection.empty()
describe "getting all results":
async it "can get results until final_future is done", final_future:
wait = hp.create_future()
queue = hp.SyncQueue(final_future)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
for item in queue:
if item == 5:
final_future.cancel()
found.append(item)
if item == 4:
wait.set_result(True)
await asyncio.sleep(0.01)
finally:
ff.cancel()
# The queue will drop remaining items
assert found == [1, 2, 3, 4, 5]
assert list(queue.remaining()) == [6, 7]
async it "ignores results added after final_future is done if still waiting for results", final_future:
wait = hp.create_future()
queue = hp.SyncQueue(final_future)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
final_future.cancel()
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
for item in queue:
found.append(item)
if item == 4:
wait.set_result(True)
await asyncio.sleep(0.01)
finally:
ff.cancel()
# The queue will drop remaining items
assert found == [1, 2, 3, 4]
assert list(queue.remaining()) == [5, 6, 7]
async it "is re-entrant if we break", final_future:
found = []
queue = hp.SyncQueue(final_future)
for i in range(10):
queue.append(i)
for item in queue:
found.append(item)
if item == 3:
break
assert found == [0, 1, 2, 3]
for item in queue:
found.append(item)
if item == 9:
final_future.cancel()
assert found == list(range(10))
describe "getting all results when empty_on_finished":
async it "can get results until final_future is done", final_future:
wait = hp.create_future()
queue = hp.SyncQueue(final_future, empty_on_finished=True)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
for item in queue:
if item == 5:
final_future.cancel()
found.append(item)
if item == 4:
wait.set_result(True)
await asyncio.sleep(0.01)
finally:
ff.cancel()
# The queue will not drop remaining items
assert found == [1, 2, 3, 4, 5, 6, 7]
assert list(queue.remaining()) == []
async it "gets results added after final_future is done if still waiting for results", final_future:
wait = hp.create_future()
queue = hp.SyncQueue(final_future, empty_on_finished=True)
ff = hp.create_future()
found = []
async def fill():
for i in (2, 3, 4):
queue.append(i)
await wait
final_future.cancel()
for i in (5, 6, 7):
queue.append(i)
try:
async with hp.TaskHolder(ff) as ts:
ts.add(fill())
queue.append(1)
for item in queue:
found.append(item)
if item == 4:
wait.set_result(True)
await asyncio.sleep(0.01)
finally:
ff.cancel()
# The queue will not remaining items
assert found == [1, 2, 3, 4, 5, 6, 7]
assert list(queue.remaining()) == []
async it "is re-entrant if we break", final_future:
found = []
queue = hp.SyncQueue(final_future, empty_on_finished=True)
for i in range(10):
queue.append(i)
for item in queue:
found.append(item)
if item == 3:
break
assert found == [0, 1, 2, 3]
for item in queue:
found.append(item)
if item == 9:
final_future.cancel()
assert found == list(range(10))
| 27.636917
| 111
| 0.462752
| 1,495
| 13,625
| 4.137124
| 0.072241
| 0.122716
| 0.019402
| 0.040744
| 0.910914
| 0.894907
| 0.89329
| 0.891027
| 0.879386
| 0.873727
| 0
| 0.027196
| 0.449468
| 13,625
| 492
| 112
| 27.693089
| 0.79736
| 0.025395
| 0
| 0.913947
| 0
| 0
| 0.06331
| 0
| 0
| 0
| 0
| 0
| 0.136499
| 0
| null | null | 0
| 0.014837
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eaafd174d2c06fe75312451e86aef2f3cc473292
| 72
|
py
|
Python
|
hello.py
|
EnfantT/build_and_test_examples
|
daf1d5aabe5830d9540ebd4b45818346136191e3
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
EnfantT/build_and_test_examples
|
daf1d5aabe5830d9540ebd4b45818346136191e3
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
EnfantT/build_and_test_examples
|
daf1d5aabe5830d9540ebd4b45818346136191e3
|
[
"Apache-2.0"
] | null | null | null |
print("Hello world from Travis CI")
print("Hello world from Travis CI")
| 24
| 35
| 0.75
| 12
| 72
| 4.5
| 0.5
| 0.37037
| 0.555556
| 0.703704
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 72
| 2
| 36
| 36
| 0.870968
| 0
| 0
| 1
| 0
| 0
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
eababb264fc432e74cb6b5b37bdb8e272c27855f
| 32,702
|
py
|
Python
|
petl/test/transform/test_intervals.py
|
OptionMetrics/petl
|
ee0a196f40c07218249be0d279b72e57d177a7fd
|
[
"MIT"
] | 495
|
2018-08-07T18:24:57.000Z
|
2022-03-31T14:57:57.000Z
|
petl/test/transform/test_intervals.py
|
OptionMetrics/petl
|
ee0a196f40c07218249be0d279b72e57d177a7fd
|
[
"MIT"
] | 204
|
2018-07-25T12:44:14.000Z
|
2022-03-28T07:52:54.000Z
|
petl/test/transform/test_intervals.py
|
OptionMetrics/petl
|
ee0a196f40c07218249be0d279b72e57d177a7fd
|
[
"MIT"
] | 88
|
2018-08-04T04:51:43.000Z
|
2022-01-17T01:05:27.000Z
|
from __future__ import absolute_import, print_function, division
import logging
import sys
import petl as etl
from petl.test.helpers import ieq, eq_
from petl.util.vis import lookall
from petl.errors import DuplicateKeyError
from petl.transform.intervals import intervallookup, intervallookupone, \
facetintervallookup, facetintervallookupone, intervaljoin, \
intervalleftjoin, intervaljoinvalues, intervalsubtract, \
collapsedintervals, _Interval, intervalantijoin
logger = logging.getLogger(__name__)
debug = logger.debug
try:
# noinspection PyUnresolvedReferences
import intervaltree
except ImportError as e:
print('SKIP interval tests: %s' % e, file=sys.stderr)
else:
def test_intervallookup():
table = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
lkp = intervallookup(table, 'start', 'stop')
actual = lkp.search(0, 1)
expect = []
eq_(expect, actual)
actual = lkp.search(1, 2)
expect = [(1, 4, 'foo')]
eq_(expect, actual)
actual = lkp.search(2, 4)
expect = [(1, 4, 'foo'), (3, 7, 'bar')]
eq_(expect, actual)
actual = lkp.search(2, 5)
expect = [(1, 4, 'foo'), (3, 7, 'bar'), (4, 9, 'baz')]
eq_(expect, actual)
actual = lkp.search(9, 14)
expect = []
eq_(expect, actual)
actual = lkp.search(19, 140)
expect = []
eq_(expect, actual)
actual = lkp.search(1)
expect = [(1, 4, 'foo')]
eq_(expect, actual)
actual = lkp.search(2)
expect = [(1, 4, 'foo')]
eq_(expect, actual)
actual = lkp.search(4)
expect = [(3, 7, 'bar'), (4, 9, 'baz')]
eq_(expect, actual)
actual = lkp.search(5)
expect = [(3, 7, 'bar'), (4, 9, 'baz')]
eq_(expect, actual)
def test_intervallookup_include_stop():
table = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, None))
lkp = intervallookup(table, 'start', 'stop', value='value',
include_stop=True)
actual = lkp.search(0, 1)
expect = ['foo']
eq_(expect, actual)
actual = lkp.search(1, 2)
expect = ['foo']
eq_(expect, actual)
actual = lkp.search(2, 4)
expect = ['foo', 'bar', None]
eq_(expect, actual)
actual = lkp.search(2, 5)
expect = ['foo', 'bar', None]
eq_(expect, actual)
actual = lkp.search(9, 14)
expect = [None]
eq_(expect, actual)
actual = lkp.search(19, 140)
expect = []
eq_(expect, actual)
actual = lkp.search(1)
expect = ['foo']
eq_(expect, actual)
actual = lkp.search(2)
expect = ['foo']
eq_(expect, actual)
actual = lkp.search(4)
expect = ['foo', 'bar', None]
eq_(expect, actual)
actual = lkp.search(5)
expect = ['bar', None]
eq_(expect, actual)
def test_intervallookupone():
table = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
lkp = intervallookupone(table, 'start', 'stop', value='value')
actual = lkp.search(0, 1)
expect = None
eq_(expect, actual)
actual = lkp.search(1, 2)
expect = 'foo'
eq_(expect, actual)
try:
lkp.search(2, 4)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
try:
lkp.search(2, 5)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
try:
lkp.search(4, 5)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
try:
lkp.search(5, 7)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
actual = lkp.search(8, 9)
expect = 'baz'
eq_(expect, actual)
actual = lkp.search(9, 14)
expect = None
eq_(expect, actual)
actual = lkp.search(19, 140)
expect = None
eq_(expect, actual)
actual = lkp.search(0)
expect = None
eq_(expect, actual)
actual = lkp.search(1)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(2)
expect = 'foo'
eq_(expect, actual)
try:
lkp.search(4)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
try:
lkp.search(5)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
actual = lkp.search(8)
expect = 'baz'
eq_(expect, actual)
def test_intervallookupone_not_strict():
table = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
lkp = intervallookupone(table, 'start', 'stop', value='value',
strict=False)
actual = lkp.search(0, 1)
expect = None
eq_(expect, actual)
actual = lkp.search(1, 2)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(2, 4)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(2, 5)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(4, 5)
expect = 'bar'
eq_(expect, actual)
actual = lkp.search(5, 7)
expect = 'bar'
eq_(expect, actual)
actual = lkp.search(8, 9)
expect = 'baz'
eq_(expect, actual)
actual = lkp.search(9, 14)
expect = None
eq_(expect, actual)
actual = lkp.search(19, 140)
expect = None
eq_(expect, actual)
actual = lkp.search(0)
expect = None
eq_(expect, actual)
actual = lkp.search(1)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(2)
expect = 'foo'
eq_(expect, actual)
actual = lkp.search(4)
expect = 'bar'
eq_(expect, actual)
actual = lkp.search(5)
expect = 'bar'
eq_(expect, actual)
actual = lkp.search(8)
expect = 'baz'
eq_(expect, actual)
def test_facetintervallookup():
table = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
lkp = facetintervallookup(table, key='type', start='start', stop='stop')
actual = lkp['apple'].search(0, 1)
expect = []
eq_(expect, actual)
actual = lkp['apple'].search(1, 2)
expect = [('apple', 1, 4, 'foo')]
eq_(expect, actual)
actual = lkp['apple'].search(2, 4)
expect = [('apple', 1, 4, 'foo'), ('apple', 3, 7, 'bar')]
eq_(expect, actual)
actual = lkp['apple'].search(2, 5)
expect = [('apple', 1, 4, 'foo'), ('apple', 3, 7, 'bar')]
eq_(expect, actual)
actual = lkp['orange'].search(2, 5)
expect = [('orange', 4, 9, 'baz')]
eq_(expect, actual)
actual = lkp['orange'].search(9, 14)
expect = []
eq_(expect, actual)
actual = lkp['orange'].search(19, 140)
expect = []
eq_(expect, actual)
actual = lkp['apple'].search(0)
expect = []
eq_(expect, actual)
actual = lkp['apple'].search(1)
expect = [('apple', 1, 4, 'foo')]
eq_(expect, actual)
actual = lkp['apple'].search(2)
expect = [('apple', 1, 4, 'foo')]
eq_(expect, actual)
actual = lkp['apple'].search(4)
expect = [('apple', 3, 7, 'bar')]
eq_(expect, actual)
actual = lkp['apple'].search(5)
expect = [('apple', 3, 7, 'bar')]
eq_(expect, actual)
actual = lkp['orange'].search(5)
expect = [('orange', 4, 9, 'baz')]
eq_(expect, actual)
def test_facetintervallookupone():
table = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
lkp = facetintervallookupone(table, key='type', start='start',
stop='stop', value='value')
actual = lkp['apple'].search(0, 1)
expect = None
eq_(expect, actual)
actual = lkp['apple'].search(1, 2)
expect = 'foo'
eq_(expect, actual)
try:
lkp['apple'].search(2, 4)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
try:
lkp['apple'].search(2, 5)
except DuplicateKeyError:
pass
else:
assert False, 'expected error'
actual = lkp['apple'].search(4, 5)
expect = 'bar'
eq_(expect, actual)
actual = lkp['orange'].search(4, 5)
expect = 'baz'
eq_(expect, actual)
actual = lkp['apple'].search(5, 7)
expect = 'bar'
eq_(expect, actual)
actual = lkp['orange'].search(5, 7)
expect = 'baz'
eq_(expect, actual)
actual = lkp['apple'].search(8, 9)
expect = None
eq_(expect, actual)
actual = lkp['orange'].search(8, 9)
expect = 'baz'
eq_(expect, actual)
actual = lkp['orange'].search(9, 14)
expect = None
eq_(expect, actual)
actual = lkp['orange'].search(19, 140)
expect = None
eq_(expect, actual)
actual = lkp['apple'].search(0)
expect = None
eq_(expect, actual)
actual = lkp['apple'].search(1)
expect = 'foo'
eq_(expect, actual)
actual = lkp['apple'].search(2)
expect = 'foo'
eq_(expect, actual)
actual = lkp['apple'].search(4)
expect = 'bar'
eq_(expect, actual)
actual = lkp['apple'].search(5)
expect = 'bar'
eq_(expect, actual)
actual = lkp['orange'].search(5)
expect = 'baz'
eq_(expect, actual)
actual = lkp['apple'].search(8)
expect = None
eq_(expect, actual)
actual = lkp['orange'].search(8)
expect = 'baz'
eq_(expect, actual)
def test_facetintervallookup_compound():
table = (('type', 'variety', 'start', 'stop', 'value'),
('apple', 'cox', 1, 4, 'foo'),
('apple', 'fuji', 3, 7, 'bar'),
('orange', 'mandarin', 4, 9, 'baz'))
lkp = facetintervallookup(table, key=('type', 'variety'), start='start',
stop='stop')
actual = lkp['apple', 'cox'].search(1, 2)
expect = [('apple', 'cox', 1, 4, 'foo')]
eq_(expect, actual)
actual = lkp['apple', 'cox'].search(2, 4)
expect = [('apple', 'cox', 1, 4, 'foo')]
eq_(expect, actual)
def test_intervaljoin():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervaljoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
expect = (('begin', 'end', 'quux', 'start', 'stop', 'value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
debug(lookall(actual))
ieq(expect, actual)
ieq(expect, actual)
def test_intervaljoin_include_stop():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervaljoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop',
include_stop=True)
expect = (('begin', 'end', 'quux', 'start', 'stop', 'value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 4, 'b', 4, 9, 'baz'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(9, 14, 'd', 4, 9, 'baz'),
(9, 140, 'e', 4, 9, 'baz'),
(1, 1, 'f', 1, 4, 'foo'),
(2, 2, 'g', 1, 4, 'foo'),
(4, 4, 'h', 1, 4, 'foo'),
(4, 4, 'h', 3, 7, 'bar'),
(4, 4, 'h', 4, 9, 'baz'),
(5, 5, 'i', 3, 7, 'bar'),
(5, 5, 'i', 4, 9, 'baz'),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
ieq(expect, actual)
ieq(expect, actual)
def test_intervaljoin_prefixes():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervaljoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop',
lprefix='l_', rprefix='r_')
expect = (('l_begin', 'l_end', 'l_quux', 'r_start', 'r_stop', 'r_value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
ieq(expect, actual)
ieq(expect, actual)
def test_intervalleftjoin():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervalleftjoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
expect = (('begin', 'end', 'quux', 'start', 'stop', 'value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(9, 14, 'd', None, None, None),
(9, 140, 'e', None, None, None),
(1, 1, 'f', None, None, None),
(2, 2, 'g', None, None, None),
(4, 4, 'h', None, None, None),
(5, 5, 'i', None, None, None),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
ieq(expect, actual)
ieq(expect, actual)
def test_intervaljoin_faceted():
left = (('fruit', 'begin', 'end'),
('apple', 1, 2),
('apple', 2, 4),
('apple', 2, 5),
('orange', 2, 5),
('orange', 9, 14),
('orange', 19, 140),
('apple', 1, 1),
('apple', 2, 2),
('apple', 4, 4),
('apple', 5, 5),
('orange', 5, 5))
right = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
expect = (('fruit', 'begin', 'end', 'type', 'start', 'stop', 'value'),
('apple', 1, 2, 'apple', 1, 4, 'foo'),
('apple', 2, 4, 'apple', 1, 4, 'foo'),
('apple', 2, 4, 'apple', 3, 7, 'bar'),
('apple', 2, 5, 'apple', 1, 4, 'foo'),
('apple', 2, 5, 'apple', 3, 7, 'bar'),
('orange', 2, 5, 'orange', 4, 9, 'baz'))
actual = intervaljoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop', lkey='fruit',
rkey='type')
ieq(expect, actual)
ieq(expect, actual)
def test_intervalleftjoin_faceted():
left = (('fruit', 'begin', 'end'),
('apple', 1, 2),
('apple', 2, 4),
('apple', 2, 5),
('orange', 2, 5),
('orange', 9, 14),
('orange', 19, 140),
('apple', 1, 1),
('apple', 2, 2),
('apple', 4, 4),
('apple', 5, 5),
('orange', 5, 5))
right = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
expect = (('fruit', 'begin', 'end', 'type', 'start', 'stop', 'value'),
('apple', 1, 2, 'apple', 1, 4, 'foo'),
('apple', 2, 4, 'apple', 1, 4, 'foo'),
('apple', 2, 4, 'apple', 3, 7, 'bar'),
('apple', 2, 5, 'apple', 1, 4, 'foo'),
('apple', 2, 5, 'apple', 3, 7, 'bar'),
('orange', 2, 5, 'orange', 4, 9, 'baz'),
('orange', 9, 14, None, None, None, None),
('orange', 19, 140, None, None, None, None),
('apple', 1, 1, None, None, None, None),
('apple', 2, 2, None, None, None, None),
('apple', 4, 4, None, None, None, None),
('apple', 5, 5, None, None, None, None),
('orange', 5, 5, None, None, None, None))
actual = intervalleftjoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop', lkey='fruit',
rkey='type')
ieq(expect, actual)
ieq(expect, actual)
def test_intervalleftjoin_faceted_rkeymissing():
left = (('fruit', 'begin', 'end'),
('apple', 1, 2),
('orange', 5, 5))
right = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'))
expect = (('fruit', 'begin', 'end', 'type', 'start', 'stop', 'value'),
('apple', 1, 2, 'apple', 1, 4, 'foo'),
('orange', 5, 5, None, None, None, None))
actual = intervalleftjoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop', lkey='fruit',
rkey='type')
ieq(expect, actual)
ieq(expect, actual)
def test_intervaljoins_faceted_compound():
left = (('fruit', 'sort', 'begin', 'end'),
('apple', 'cox', 1, 2),
('apple', 'fuji', 2, 4))
right = (('type', 'variety', 'start', 'stop', 'value'),
('apple', 'cox', 1, 4, 'foo'),
('apple', 'fuji', 3, 7, 'bar'),
('orange', 'mandarin', 4, 9, 'baz'))
expect = (('fruit', 'sort', 'begin', 'end', 'type', 'variety', 'start',
'stop', 'value'),
('apple', 'cox', 1, 2, 'apple', 'cox', 1, 4, 'foo'),
('apple', 'fuji', 2, 4, 'apple', 'fuji', 3, 7, 'bar'))
actual = intervaljoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop',
lkey=('fruit', 'sort'),
rkey=('type', 'variety'))
ieq(expect, actual)
ieq(expect, actual)
actual = intervalleftjoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop',
lkey=('fruit', 'sort'),
rkey=('type', 'variety'))
ieq(expect, actual)
ieq(expect, actual)
def test_intervalleftjoin_prefixes():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervalleftjoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop',
lprefix='l_', rprefix='r_')
expect = (('l_begin', 'l_end', 'l_quux', 'r_start', 'r_stop', 'r_value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(9, 14, 'd', None, None, None),
(9, 140, 'e', None, None, None),
(1, 1, 'f', None, None, None),
(2, 2, 'g', None, None, None),
(4, 4, 'h', None, None, None),
(5, 5, 'i', None, None, None),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
ieq(expect, actual)
ieq(expect, actual)
def test_intervalantijoin():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervalantijoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
expect = (('begin', 'end', 'quux'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'))
debug(lookall(actual))
ieq(expect, actual)
ieq(expect, actual)
def test_intervalantijoin_include_stop():
left = (('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(10, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j'))
right = (('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz'))
actual = intervalantijoin(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop',
include_stop=True)
expect = (('begin', 'end', 'quux'),
(10, 140, 'e'))
debug(lookall(actual))
ieq(expect, actual)
ieq(expect, actual)
def test_intervalantijoin_faceted():
left = (('fruit', 'begin', 'end'),
('apple', 1, 2),
('apple', 2, 4),
('apple', 2, 5),
('orange', 2, 5),
('orange', 9, 14),
('orange', 19, 140),
('apple', 1, 1),
('apple', 2, 2),
('apple', 4, 4),
('apple', 5, 5),
('orange', 5, 5))
right = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
expect = (('fruit', 'begin', 'end'),
('orange', 9, 14),
('orange', 19, 140),
('apple', 1, 1),
('apple', 2, 2),
('apple', 4, 4),
('apple', 5, 5),
('orange', 5, 5))
actual = intervalantijoin(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop', lkey='fruit',
rkey='type')
ieq(expect, actual)
ieq(expect, actual)
def test_intervaljoinvalues_faceted():
left = (('fruit', 'begin', 'end'),
('apple', 1, 2),
('apple', 2, 4),
('apple', 2, 5),
('orange', 2, 5),
('orange', 9, 14),
('orange', 19, 140),
('apple', 1, 1),
('apple', 2, 2),
('apple', 4, 4),
('apple', 5, 5),
('orange', 5, 5))
right = (('type', 'start', 'stop', 'value'),
('apple', 1, 4, 'foo'),
('apple', 3, 7, 'bar'),
('orange', 4, 9, 'baz'))
expect = (('fruit', 'begin', 'end', 'value'),
('apple', 1, 2, ['foo']),
('apple', 2, 4, ['foo', 'bar']),
('apple', 2, 5, ['foo', 'bar']),
('orange', 2, 5, ['baz']),
('orange', 9, 14, []),
('orange', 19, 140, []),
('apple', 1, 1, []),
('apple', 2, 2, []),
('apple', 4, 4, []),
('apple', 5, 5, []),
('orange', 5, 5, []))
actual = intervaljoinvalues(left, right, lstart='begin', lstop='end',
rstart='start', rstop='stop', lkey='fruit',
rkey='type', value='value')
ieq(expect, actual)
ieq(expect, actual)
def test_subtract_1():
left = (('begin', 'end', 'label'),
(1, 6, 'apple'),
(3, 6, 'orange'),
(5, 9, 'banana'))
right = (('start', 'stop', 'foo'),
(3, 4, True))
expect = (('begin', 'end', 'label'),
(1, 3, 'apple'),
(4, 6, 'apple'),
(4, 6, 'orange'),
(5, 9, 'banana'))
actual = intervalsubtract(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
ieq(expect, actual)
ieq(expect, actual)
def test_subtract_2():
left = (('begin', 'end', 'label'),
(1, 6, 'apple'),
(3, 6, 'orange'),
(5, 9, 'banana'))
right = (('start', 'stop', 'foo'),
(3, 4, True),
(5, 6, True))
expect = (('begin', 'end', 'label'),
(1, 3, 'apple'),
(4, 5, 'apple'),
(4, 5, 'orange'),
(6, 9, 'banana'))
actual = intervalsubtract(left, right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
ieq(expect, actual)
ieq(expect, actual)
def test_subtract_faceted():
left = (('region', 'begin', 'end', 'label'),
('north', 1, 6, 'apple'),
('south', 3, 6, 'orange'),
('west', 5, 9, 'banana'))
right = (('place', 'start', 'stop', 'foo'),
('south', 3, 4, True),
('north', 5, 6, True))
expect = (('region', 'begin', 'end', 'label'),
('north', 1, 5, 'apple'),
('south', 4, 6, 'orange'),
('west', 5, 9, 'banana'))
actual = intervalsubtract(left, right,
lkey='region', rkey='place',
lstart='begin', lstop='end',
rstart='start', rstop='stop')
ieq(expect, actual)
ieq(expect, actual)
def test_collapse():
# no facet key
tbl = (('begin', 'end', 'label'),
(1, 6, 'apple'),
(3, 6, 'orange'),
(5, 9, 'banana'),
(12, 14, 'banana'),
(13, 17, 'kiwi'))
expect = [_Interval(1, 9), _Interval(12, 17)]
actual = collapsedintervals(tbl, start='begin', stop='end')
ieq(expect, actual)
# faceted
tbl = (('region', 'begin', 'end', 'label'),
('north', 1, 6, 'apple'),
('north', 3, 6, 'orange'),
('north', 5, 9, 'banana'),
('south', 12, 14, 'banana'),
('south', 13, 17, 'kiwi'))
expect = [('north', 1, 9), ('south', 12, 17)]
actual = collapsedintervals(tbl, start='begin', stop='end',
key='region')
ieq(expect, actual)
def test_integration():
left = etl.wrap((('begin', 'end', 'quux'),
(1, 2, 'a'),
(2, 4, 'b'),
(2, 5, 'c'),
(9, 14, 'd'),
(9, 140, 'e'),
(1, 1, 'f'),
(2, 2, 'g'),
(4, 4, 'h'),
(5, 5, 'i'),
(1, 8, 'j')))
right = etl.wrap((('start', 'stop', 'value'),
(1, 4, 'foo'),
(3, 7, 'bar'),
(4, 9, 'baz')))
actual = left.intervaljoin(right,
lstart='begin', lstop='end',
rstart='start', rstop='stop')
expect = (('begin', 'end', 'quux', 'start', 'stop', 'value'),
(1, 2, 'a', 1, 4, 'foo'),
(2, 4, 'b', 1, 4, 'foo'),
(2, 4, 'b', 3, 7, 'bar'),
(2, 5, 'c', 1, 4, 'foo'),
(2, 5, 'c', 3, 7, 'bar'),
(2, 5, 'c', 4, 9, 'baz'),
(1, 8, 'j', 1, 4, 'foo'),
(1, 8, 'j', 3, 7, 'bar'),
(1, 8, 'j', 4, 9, 'baz'))
ieq(expect, actual)
ieq(expect, actual)
| 30.997156
| 81
| 0.370436
| 3,339
| 32,702
| 3.581012
| 0.043127
| 0.115414
| 0.090156
| 0.112068
| 0.891612
| 0.863929
| 0.849795
| 0.833152
| 0.817931
| 0.760057
| 0
| 0.063248
| 0.440126
| 32,702
| 1,054
| 82
| 31.026565
| 0.589819
| 0.001712
| 0
| 0.809412
| 0
| 0
| 0.115189
| 0
| 0
| 0
| 0
| 0
| 0.009412
| 1
| 0.029412
| false
| 0.009412
| 0.011765
| 0
| 0.041176
| 0.002353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d82d73648eecdf614cc2a52e1476871c354a96be
| 4,446
|
py
|
Python
|
tslib/readers/tests/pi_xml_reader_tests.py
|
nens/tslib
|
e568436d6a4edf56608c96efe646ed2352274546
|
[
"MIT"
] | 1
|
2015-10-27T20:26:59.000Z
|
2015-10-27T20:26:59.000Z
|
tslib/readers/tests/pi_xml_reader_tests.py
|
nens/tslib
|
e568436d6a4edf56608c96efe646ed2352274546
|
[
"MIT"
] | 2
|
2018-01-11T09:49:43.000Z
|
2018-07-24T09:39:14.000Z
|
tslib/readers/tests/pi_xml_reader_tests.py
|
nens/tslib
|
e568436d6a4edf56608c96efe646ed2352274546
|
[
"MIT"
] | null | null | null |
import os
import unittest
from tslib.readers import PiXmlReader
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class TestPiXmlReader(unittest.TestCase):
def test_parse_pi_xml_01(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_02(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_03(self):
"""Parse a file with timeZone element."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(1.0, tz)
def test_parse_pi_xml_04(self):
"""Parse a file with empty timeZone element."""
source = os.path.join(DATA_DIR, "empty_tz.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(0.0, tz)
def test_parse_pi_xml_05(self):
"""Parse a file without timeZone element."""
source = os.path.join(DATA_DIR, "no_tz.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(None, tz)
def test_parse_pi_xml_06(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
self.assertEqual(None, df)
def test_parse_pi_xml_07(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_08(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_09(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
self.assertEqual(None, df)
class BulkTestPiXmlReader(unittest.TestCase):
def test_parse_pi_xml_01(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_02(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_03(self):
"""Parse a file with timeZone element."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(1.0, tz)
def test_parse_pi_xml_06(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
self.assertEqual(None, df)
def test_parse_pi_xml_07(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=300):
pass
self.assertTrue(True)
def test_parse_pi_xml_08(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_09(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
self.assertEqual(None, df)
| 33.428571
| 61
| 0.617409
| 603
| 4,446
| 4.338308
| 0.111111
| 0.041284
| 0.064985
| 0.085627
| 0.93922
| 0.924312
| 0.924312
| 0.915902
| 0.88685
| 0.88685
| 0
| 0.014115
| 0.266982
| 4,446
| 132
| 62
| 33.681818
| 0.788585
| 0.107962
| 0
| 0.861702
| 0
| 0
| 0.060489
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 1
| 0.170213
| false
| 0.085106
| 0.031915
| 0
| 0.223404
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
dc7c50b78b1f22b38da8ff79b142e1fa1708ed36
| 3,027
|
py
|
Python
|
app/Search/cypher_queries.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | null | null | null |
app/Search/cypher_queries.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | 2
|
2021-03-19T03:39:56.000Z
|
2021-06-08T20:28:03.000Z
|
app/Search/cypher_queries.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | null | null | null |
def filter_person_query(name, skip, first):
return '''
OPTIONAL MATCH (p:Person)
WHERE p.firstname =~ '(?i){name}.*'
OR p.lastname =~ '(?i){name}.*'
OR p.title =~ '(?i).*{name}.*'
OR p.email =~ '(?i).*{name}.*'
RETURN p {
.firstname,
.mobile,
.bio,
.id,
.title,
.email,
.lastname,
.avatar,
.knownAs,
manager: apoc.cypher.runFirstColumn("MATCH (m)-[:MANAGES]->(this) RETURN m LIMIT 1", {this: p}, false),
team: [(p)<-[:MANAGES]-()-[:MANAGES]->(t) | t],
line: [(s)<-[:MANAGES]-(p) | s],
products: [(p)-[:KNOWS]->(pr) | pr],
building: [(p)-[:BASED_IN]->(b) | b]
} AS person
ORDER BY person.lastname ASC, person.firstname ASC
SKIP {skip}
LIMIT {first}
'''.replace('{name}',name).replace('{skip}',skip).replace('{first}',first)
def filter_person_query_2(name, skip, first):
return '''
WITH '{name}' AS query
OPTIONAL MATCH (p:Person), (b:Building), (pr:Product)
WHERE (p.title =~ '(?i).*{name}.*'
OR p.firstname =~ '(?i){name}.*'
OR p.lastname =~ '(?i){name}.*'
OR query CONTAINS " " AND (toLower(query) = toLower(p.firstname) + " " + toLower(p.lastname))
OR query CONTAINS ", " AND (toLower(query) = toLower(p.lastname) + ", " + toLower(p.firstname))
OR ((p)--(b) AND (toLower(b.name) CONTAINS toLower(query) OR toLower(b.address) CONTAINS toLower(query)))
OR ((p)--(pr) AND (toLower(pr.name) CONTAINS toLower(query))))
RETURN p {
.firstname,
.mobile,
.bio,
.id,
.title,
.email,
.lastname,
.avatar,
.knownAs,
manager: apoc.cypher.runFirstColumn("MATCH (m)-[:MANAGES]->(this) RETURN m LIMIT 1", {this: p}, false),
team: [(p)<-[:MANAGES]-()-[:MANAGES]->(t) | t],
line: [(s)<-[:MANAGES]-(p) | s],
products: [(p)-[:KNOWS]->(pr) | pr],
building: [(p)-[:BASED_IN]->(b) | b]
} AS person
ORDER BY person.lastname ASC, person.firstname ASC
SKIP {skip}
LIMIT {first}
'''.replace('{name}',name).replace('{skip}',skip).replace('{first}',first)
def filter_person_query_3(name, skip, first):
return '''
WITH '{name}' AS query
OPTIONAL MATCH (p:Person), (b:Building), (pr:Product)
WHERE (p.title =~ '(?i).*{name}.*'
OR p.firstname =~ '(?i){name}.*'
OR p.lastname =~ '(?i){name}.*'
OR query CONTAINS " " AND (toLower(query) = toLower(p.firstname) + " " + toLower(p.lastname))
OR query CONTAINS ", " AND (toLower(query) = toLower(p.lastname) + ", " + toLower(p.firstname))
OR ((p)--(b) AND (toLower(b.name) CONTAINS toLower(query) OR toLower(b.address) CONTAINS toLower(query)))
OR ((p)--(pr) AND (toLower(pr.name) CONTAINS toLower(query))))
WITH p AS person
RETURN DISTINCT person
ORDER BY person.lastname ASC, person.firstname ASC
SKIP {skip}
LIMIT {first}
'''.replace('{name}', name).replace('{skip}', skip).replace('{first}', first)
| 39.311688
| 119
| 0.552032
| 376
| 3,027
| 4.417553
| 0.154255
| 0.019868
| 0.037929
| 0.033715
| 0.937989
| 0.937989
| 0.929561
| 0.929561
| 0.929561
| 0.929561
| 0
| 0.001721
| 0.232243
| 3,027
| 77
| 120
| 39.311688
| 0.712995
| 0
| 0
| 0.88
| 0
| 0.106667
| 0.885403
| 0.087186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0.04
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dc8fd077b22acc164480c1bd4fe654b4f0869226
| 1,185
|
py
|
Python
|
.pylicense.py
|
TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt
|
a987c10d4b71a06ccd5506406d7ee67443896f88
|
[
"BSD-2-Clause"
] | null | null | null |
.pylicense.py
|
TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt
|
a987c10d4b71a06ccd5506406d7ee67443896f88
|
[
"BSD-2-Clause"
] | null | null | null |
.pylicense.py
|
TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt
|
a987c10d4b71a06ccd5506406d7ee67443896f88
|
[
"BSD-2-Clause"
] | null | null | null |
# ~~~
# This file is part of the paper:
#
# "An adaptive projected Newton non-conforming dual approach
# for trust-region reduced basis approximation of PDE-constrained
# parameter optimization"
#
# https://github.com/TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt
#
# Copyright 2019-2020 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# Felix Schindler (2020)
# Tim Keil (2020)
# ~~~
name = '''This file is part of the paper:
#
# "An adaptive projected Newton non-conforming dual approach
# for trust-region reduced basis approximation of PDE-constrained
# parameter optimization"
#
'''
url = 'https://github.com/TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt\n#'
copyright_statement = 'Copyright 2019-2020 all developers. All rights reserved.'
license = '''Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)'''
prefix = '#'
lead_in = '# ~~~'
lead_out = '# ~~~'
include_patterns = ('*.py', '*.md', '*.sh')
exclude_patterns = ('venv/*', '*.png', '*.pyc')
| 35.909091
| 94
| 0.653165
| 149
| 1,185
| 5.161074
| 0.496644
| 0.020806
| 0.052016
| 0.036411
| 0.83485
| 0.83485
| 0.83485
| 0.83485
| 0.83485
| 0.83485
| 0
| 0.029536
| 0.2
| 1,185
| 32
| 95
| 37.03125
| 0.781646
| 0.427004
| 0
| 0
| 0
| 0.133333
| 0.720965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc93dd1ebe66f4421ea22d1c111311966e67023f
| 326
|
py
|
Python
|
code/python/external/gittwit/config/SampleAuth.py
|
rec/echomesh
|
be668971a687b141660fd2e5635d2fd598992a01
|
[
"MIT"
] | 30
|
2015-02-18T14:07:00.000Z
|
2021-12-11T15:19:01.000Z
|
code/python/external/gittwit/config/SampleAuth.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | 16
|
2015-01-01T23:17:24.000Z
|
2015-04-18T23:49:27.000Z
|
code/python/external/gittwit/config/SampleAuth.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | 31
|
2015-03-11T20:04:07.000Z
|
2020-11-02T13:56:59.000Z
|
AUTH = dict(
twitter=dict(
tech=dict(
consumer_key='',
consumer_secret='',
access_token_key='',
access_token_secret='',
),
President=dict(
consumer_key='',
consumer_secret='',
access_token_key='',
access_token_secret='',
)
),
yourls='',
index_url=''
)
| 15.52381
| 29
| 0.54908
| 31
| 326
| 5.354839
| 0.419355
| 0.26506
| 0.180723
| 0.277108
| 0.722892
| 0.722892
| 0.722892
| 0.722892
| 0.722892
| 0.722892
| 0
| 0
| 0.288344
| 326
| 20
| 30
| 16.3
| 0.715517
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dc952695a59fff83943914c47ecbc3d8701f8ffb
| 15,520
|
py
|
Python
|
test/connectivity/acts/tests/google/ble/concurrency/ConcurrentBleScanningTest.py
|
Keneral/atools
|
055e76621340c7dced125e9de56e2645b5e1cdfb
|
[
"Unlicense"
] | null | null | null |
test/connectivity/acts/tests/google/ble/concurrency/ConcurrentBleScanningTest.py
|
Keneral/atools
|
055e76621340c7dced125e9de56e2645b5e1cdfb
|
[
"Unlicense"
] | null | null | null |
test/connectivity/acts/tests/google/ble/concurrency/ConcurrentBleScanningTest.py
|
Keneral/atools
|
055e76621340c7dced125e9de56e2645b5e1cdfb
|
[
"Unlicense"
] | 1
|
2018-02-24T19:13:01.000Z
|
2018-02-24T19:13:01.000Z
|
#/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Test script to exercises Ble Scans can run in concurrency.
This test was designed to be run in a shield box.
"""
import concurrent
import time
from queue import Empty
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.test_utils.bt.BleEnum import AdvertiseSettingsAdvertiseMode
from acts.test_utils.bt.BleEnum import ScanSettingsCallbackType
from acts.test_utils.bt.BleEnum import ScanSettingsScanMode
from acts.test_utils.bt.bt_test_utils import adv_succ
from acts.test_utils.bt.bt_test_utils import generate_ble_advertise_objects
from acts.test_utils.bt.bt_test_utils import get_advanced_droid_list
from acts.test_utils.bt.bt_test_utils import reset_bluetooth
from acts.test_utils.bt.bt_test_utils import scan_failed
from acts.test_utils.bt.bt_test_utils import scan_result
from acts.test_utils.bt.bt_test_utils import take_btsnoop_logs
class ConcurrentBleScanningTest(BluetoothBaseTest):
default_timeout = 20
max_concurrent_scans = 28
def __init__(self, controllers):
BluetoothBaseTest.__init__(self, controllers)
self.droid_list = get_advanced_droid_list(self.android_devices)
self.scn_ad = self.android_devices[0]
self.adv_ad = self.android_devices[1]
if self.droid_list[1]['max_advertisements'] == 0:
self.tests = ("test_max_concurrent_ble_scans_plus_one", )
return
def on_fail(self, test_name, begin_time):
self.log.debug("Test {} failed. Gathering bugreport and btsnoop logs."
.format(test_name))
take_btsnoop_logs(self.android_devices, self, test_name)
reset_bluetooth(self.android_devices)
def setup_test(self):
return reset_bluetooth(self.android_devices)
@BluetoothBaseTest.bt_test_wrap
def test_max_concurrent_ble_scans(self):
"""Test max LE scans.
Test that a single device can have max scans concurrently scanning.
Steps:
1. Initialize scanner
2. Initialize advertiser
3. Start advertising on the device from step 2
4. Create max ble scan callbacks
5. Start ble scan on each callback
6. Verify that each callback triggers
7. Stop all scans and advertisements
Expected Result:
All scanning instances should start without errors and the advertisement
should be found on each scan instance.
Returns:
Pass if True
Fail if False
TAGS: LE, Scanning, Concurrency
Priority: 0
"""
test_result = True
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_ALL_MATCHES.value)
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(
AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
self.adv_ad.droid.bleSetAdvertiseSettingsIsConnectable(False)
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.exception("Test failed with Empty error: {}".format(
error))
test_result = False
except concurrent.futures._base.TimeoutError as error:
self.log.exception(
"Test failed callback onSuccess never occurred: "
"{}".format(error))
test_result = False
if not test_result:
return test_result
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleBuildScanFilter(filter_list)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback_list = []
for i in range(self.max_concurrent_scans):
self.log.debug("Concurrent Ble Scan iteration {}".format(i + 1))
scan_callback = self.scn_ad.droid.bleGenScanCallback()
scan_callback_list.append(scan_callback)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
try:
self.scn_ad.ed.pop_event(
scan_result.format(scan_callback), self.default_timeout)
self.log.info("Found scan event successfully. Iteration {} "
"successful.".format(i))
except Exception:
self.log.info("Failed to find a scan result for callback {}"
.format(scan_callback))
test_result = False
break
for callback in scan_callback_list:
self.scn_ad.droid.bleStopBleScan(callback)
self.adv_ad.droid.bleStopBleAdvertising(advertise_callback)
if not test_result:
return test_result
self.log.info("Waiting for scan callbacks to stop completely.")
# Wait for all scan callbacks to stop. There is no confirmation
# otherwise.
time.sleep(10)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_concurrent_ble_scans_then_discover_advertisement(self):
"""Test max LE scans variant.
Test that a single device can have max scans concurrently scanning.
Steps:
1. Initialize scanner
2. Initialize advertiser
3. Create max ble scan callbacks
4. Start ble scan on each callback
5. Start advertising on the device from step 2
6. Verify that each callback triggers
7. Stop all scans and advertisements
Expected Result:
All scanning instances should start without errors and the advertisement
should be found on each scan instance.
Returns:
Pass if True
Fail if False
TAGS: LE, Scanning, Concurrency
Priority: 1
"""
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_ALL_MATCHES.value)
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(
AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleBuildScanFilter(filter_list)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback_list = []
for i in range(self.max_concurrent_scans):
self.log.debug("Concurrent Ble Scan iteration {}".format(i + 1))
scan_callback = self.scn_ad.droid.bleGenScanCallback()
scan_callback_list.append(scan_callback)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.exception("Test failed with Empty error: {}".format(
error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.exception("Test failed, filtering callback onSuccess "
"never occurred: {}".format(error))
return False
i = 0
for callback in scan_callback_list:
try:
self.scn_ad.ed.pop_event(
scan_result.format(scan_callback), self.default_timeout)
self.log.info(
"Found scan event successfully. Iteration {} successful."
.format(i))
except Exception:
self.log.info("Failed to find a scan result for callback {}"
.format(scan_callback))
return False
i += 1
for callback in scan_callback_list:
self.scn_ad.droid.bleStopBleScan(callback)
self.adv_ad.droid.bleStopBleAdvertising(advertise_callback)
return True
@BluetoothBaseTest.bt_test_wrap
def test_max_concurrent_ble_scans_plus_one(self):
"""Test mac LE scans variant.
Test that a single device can have max scans concurrently scanning.
Steps:
1. Initialize scanner
3. Create max ble scan callbacks plus one
5. Start ble scan on each callback
6. Verify that the n+1th scan fails.
7. Stop all scans
Expected Result:
The n+1th scan should fail to start.
Returns:
Pass if True
Fail if False
TAGS: LE, Scanning, Concurrency
Priority: 1
"""
test_result = True
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_ALL_MATCHES.value)
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleBuildScanFilter(filter_list)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback_list = []
for i in range(self.max_concurrent_scans):
self.log.debug("Concurrent Ble Scan iteration {}".format(i + 1))
scan_callback = self.scn_ad.droid.bleGenScanCallback()
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
scan_callback_list.append(scan_callback)
scan_callback = self.scn_ad.droid.bleGenScanCallback()
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
try:
self.scn_ad.ed.pop_event(
scan_failed.format(scan_callback), self.default_timeout)
self.log.info(
"Found scan event successfully. Iteration {} successful."
.format(i))
except Exception:
self.log.info("Failed to find a onScanFailed event for callback {}"
.format(scan_callback))
test_result = False
for callback in scan_callback_list:
self.scn_ad.droid.bleStopBleScan(callback)
return test_result
@BluetoothBaseTest.bt_test_wrap
def test_max_concurrent_ble_scans_verify_scans_stop_independently(self):
"""Test max LE scans variant.
Test that a single device can have max scans concurrently scanning.
Steps:
1. Initialize scanner
2. Initialize advertiser
3. Create max ble scan callbacks
4. Start ble scan on each callback
5. Start advertising on the device from step 2
6. Verify that the first callback triggers
7. Stop the scan and repeat steps 6 and 7 until all scans stopped
Expected Result:
All scanning instances should start without errors and the advertisement
should be found on each scan instance. All scanning instances should
stop successfully.
Returns:
Pass if True
Fail if False
TAGS: LE, Scanning, Concurrency
Priority: 1
"""
self.adv_ad.droid.bleSetAdvertiseDataIncludeDeviceName(True)
self.scn_ad.droid.bleSetScanSettingsCallbackType(
ScanSettingsCallbackType.CALLBACK_TYPE_ALL_MATCHES.value)
self.scn_ad.droid.bleSetScanSettingsScanMode(
ScanSettingsScanMode.SCAN_MODE_LOW_LATENCY.value)
self.adv_ad.droid.bleSetAdvertiseSettingsAdvertiseMode(
AdvertiseSettingsAdvertiseMode.ADVERTISE_MODE_LOW_LATENCY.value)
advertise_callback, advertise_data, advertise_settings = (
generate_ble_advertise_objects(self.adv_ad.droid))
filter_list = self.scn_ad.droid.bleGenFilterList()
self.scn_ad.droid.bleSetScanFilterDeviceName(
self.adv_ad.droid.bluetoothGetLocalName())
self.scn_ad.droid.bleBuildScanFilter(filter_list)
scan_settings = self.scn_ad.droid.bleBuildScanSetting()
scan_callback_list = []
for i in range(self.max_concurrent_scans):
self.log.debug("Concurrent Ble Scan iteration {}".format(i + 1))
scan_callback = self.scn_ad.droid.bleGenScanCallback()
scan_callback_list.append(scan_callback)
self.scn_ad.droid.bleStartBleScan(filter_list, scan_settings,
scan_callback)
self.adv_ad.droid.bleStartBleAdvertising(
advertise_callback, advertise_data, advertise_settings)
try:
self.adv_ad.ed.pop_event(
adv_succ.format(advertise_callback), self.default_timeout)
except Empty as error:
self.log.exception("Test failed with Empty error: {}".format(
error))
return False
except concurrent.futures._base.TimeoutError as error:
self.log.exception(
"Test failed, filtering callback onSuccess never"
" occurred: {}".format(error))
return False
i = 0
for callback in scan_callback_list:
expected_scan_event_name = scan_result.format(scan_callback)
try:
self.scn_ad.ed.pop_event(expected_scan_event_name,
self.default_timeout)
self.log.info(
"Found scan event successfully. Iteration {} successful.".format(
i))
i += 1
except Exception:
self.log.info(
"Failed to find a scan result for callback {}".format(
scan_callback))
return False
self.scn_ad.droid.bleStopBleScan(callback)
self.adv_ad.droid.bleStopBleAdvertising(advertise_callback)
return True
| 43.231198
| 85
| 0.65393
| 1,765
| 15,520
| 5.546176
| 0.143343
| 0.040045
| 0.038615
| 0.052917
| 0.814996
| 0.797936
| 0.787619
| 0.767494
| 0.757789
| 0.735928
| 0
| 0.005822
| 0.280606
| 15,520
| 358
| 86
| 43.351955
| 0.870936
| 0.188015
| 0
| 0.75431
| 0
| 0
| 0.07929
| 0.003168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030172
| false
| 0
| 0.060345
| 0.00431
| 0.163793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcbbf8a0cd788188c25f4ce97de303fee900e9be
| 46
|
py
|
Python
|
tests/test_settlers.py
|
dakrauth/django-settlers
|
3754296ee979a95fbd5885964cc0c1bfe301a3a0
|
[
"MIT"
] | null | null | null |
tests/test_settlers.py
|
dakrauth/django-settlers
|
3754296ee979a95fbd5885964cc0c1bfe301a3a0
|
[
"MIT"
] | null | null | null |
tests/test_settlers.py
|
dakrauth/django-settlers
|
3754296ee979a95fbd5885964cc0c1bfe301a3a0
|
[
"MIT"
] | null | null | null |
import pytest
def test_a():
assert True
| 7.666667
| 15
| 0.673913
| 7
| 46
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 46
| 5
| 16
| 9.2
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f493818d9ed32ec5a552683bf2e7dd9a4047e589
| 64
|
py
|
Python
|
example/dir2/dir3/module_to_import.py
|
simitii/python_parent_import
|
6ba4438b6d8360af26af745e4e6297267702e9f3
|
[
"MIT"
] | 3
|
2020-04-25T11:00:04.000Z
|
2020-10-26T12:27:31.000Z
|
example/dir2/dir3/module_to_import.py
|
simitii/python_parent_import
|
6ba4438b6d8360af26af745e4e6297267702e9f3
|
[
"MIT"
] | null | null | null |
example/dir2/dir3/module_to_import.py
|
simitii/python_parent_import
|
6ba4438b6d8360af26af745e4e6297267702e9f3
|
[
"MIT"
] | null | null | null |
print("Module Imported")
def method1():
print("Hello World")
| 21.333333
| 24
| 0.6875
| 8
| 64
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.140625
| 64
| 3
| 25
| 21.333333
| 0.781818
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
f4c803a2fa19895fd84c80cb9cbc3980712bb00f
| 54
|
py
|
Python
|
celerylog/__init__.py
|
bcambel/celery-log
|
6380e2c7132ecf5f701d358c7031514654f7d60d
|
[
"Unlicense"
] | null | null | null |
celerylog/__init__.py
|
bcambel/celery-log
|
6380e2c7132ecf5f701d358c7031514654f7d60d
|
[
"Unlicense"
] | null | null | null |
celerylog/__init__.py
|
bcambel/celery-log
|
6380e2c7132ecf5f701d358c7031514654f7d60d
|
[
"Unlicense"
] | null | null | null |
import uuid
def newid():
return uuid.uuid4().hex
| 10.8
| 27
| 0.666667
| 8
| 54
| 4.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.203704
| 54
| 4
| 28
| 13.5
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
52927e0913e93d6efb3ca04a60dd2a67d77e73f4
| 6,633
|
py
|
Python
|
tests/test_encoder.py
|
Caedin/TimeSeriesEncoder
|
980e0ca1c703af80f89564d86e80decf2e901a7d
|
[
"MIT"
] | null | null | null |
tests/test_encoder.py
|
Caedin/TimeSeriesEncoder
|
980e0ca1c703af80f89564d86e80decf2e901a7d
|
[
"MIT"
] | null | null | null |
tests/test_encoder.py
|
Caedin/TimeSeriesEncoder
|
980e0ca1c703af80f89564d86e80decf2e901a7d
|
[
"MIT"
] | null | null | null |
from src.timeseriesencoder import NumericEncoder
import pytest
import numpy as np
def runner(encoder, verbose=False):
if encoder.numeric_type == 'float':
c = np.arange(encoder.min_value, encoder.max_value, 10 ** (-1 * encoder.float_precision)).round(encoder.float_precision)
else:
c = np.arange(encoder.min_value, encoder.max_value, 1).round(encoder.float_precision)
encoded = encoder.encode(c)
result = encoder.decode(encoded)
if verbose:
print(f'Input: {c}, Encoded: {encoded}, Decoded: {result}')
for i in range(len(c)):
assert c[i] == result[i]
def test_signed_1bit_int():
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'int')
runner(encoder)
def test_unsigned_1bit_int():
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'int')
runner(encoder)
def test_signed_1bit_float_1():
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 1)
runner(encoder)
def test_unsigned_1bit_float_1():
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 1)
runner(encoder)
def test_signed_1bit_float_2():
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 2)
runner(encoder)
def test_unsigned_1bit_float_2():
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 2)
runner(encoder)
def test_signed_2bit_int():
encoder = NumericEncoder(signed = True, encoding_depth = 2, numeric_type = 'int')
runner(encoder)
def test_unsigned_2bit_int():
encoder = NumericEncoder(signed = False, encoding_depth = 2, numeric_type = 'int')
runner(encoder)
def test_signed_2bit_float_1():
encoder = NumericEncoder(signed = True, encoding_depth = 2, numeric_type = 'float', float_precision = 1)
runner(encoder)
def test_unsigned_2bit_float_1():
encoder = NumericEncoder(signed = False, encoding_depth = 2, numeric_type = 'float', float_precision = 1)
runner(encoder)
def test_signed_2bit_float_1():
encoder = NumericEncoder(signed = True, encoding_depth = 2, numeric_type = 'float', float_precision = 2)
runner(encoder)
def test_unsigned_2bit_float_1():
encoder = NumericEncoder(signed = False, encoding_depth = 2, numeric_type = 'float', float_precision = 2)
runner(encoder)
def test_all():
for depth in range(2):
encoder = NumericEncoder(signed = False, encoding_depth = depth+1, numeric_type = 'int')
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = depth+1, numeric_type = 'int')
runner(encoder)
for prec in range(2):
encoder = NumericEncoder(signed = False, encoding_depth = depth+1, numeric_type = 'float', float_precision = prec+1)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = depth+1, numeric_type = 'float', float_precision = prec+1)
runner(encoder)
def test_base16():
character_set = NumericEncoder.get_base_16()
encoder = NumericEncoder(signed = True, encoding_depth = 3, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
def test_base64():
character_set = NumericEncoder.get_base_64()
encoder = NumericEncoder(signed = True, encoding_depth = 3, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
def test_base91():
character_set = NumericEncoder.get_base_91()
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'int', character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 1, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = True, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
encoder = NumericEncoder(signed = False, encoding_depth = 1, numeric_type = 'float', float_precision = 2, character_set = character_set)
runner(encoder)
| 45.744828
| 141
| 0.702397
| 791
| 6,633
| 5.633375
| 0.082174
| 0.121185
| 0.224192
| 0.110637
| 0.9136
| 0.888914
| 0.88465
| 0.875898
| 0.868268
| 0.832361
| 0
| 0.018165
| 0.194934
| 6,633
| 145
| 142
| 45.744828
| 0.816292
| 0
| 0
| 0.568807
| 0
| 0
| 0.032203
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.155963
| false
| 0
| 0.027523
| 0
| 0.183486
| 0.009174
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bfdc0697d04cf4efde230b7a8c3888bad62b5964
| 37,357
|
py
|
Python
|
oas_dev/notebooks/global_comparisons/01_maps/maps_CCN-default-both-season.py
|
sarambl/OAS-DEV
|
8dec6d29ef23dee8135bc937cd6ee1ef5b64d304
|
[
"CC0-1.0"
] | null | null | null |
oas_dev/notebooks/global_comparisons/01_maps/maps_CCN-default-both-season.py
|
sarambl/OAS-DEV
|
8dec6d29ef23dee8135bc937cd6ee1ef5b64d304
|
[
"CC0-1.0"
] | null | null | null |
oas_dev/notebooks/global_comparisons/01_maps/maps_CCN-default-both-season.py
|
sarambl/OAS-DEV
|
8dec6d29ef23dee8135bc937cd6ee1ef5b64d304
|
[
"CC0-1.0"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
from oas_dev.util.plot.plot_maps import plot_map_diff, fix_axis4map_plot, plot_map_abs_abs_diff, plot_map, subplots_map, plot_map_diff_2case,plot_map_diff_only
from useful_scit.imps import (np, xr, plt, pd)
from oas_dev.util.imports import get_averaged_fields
from IPython.display import clear_output
# load and autoreload
from IPython import get_ipython
# noinspection PyBroadException
try:
_ipython = get_ipython()
_magic = _ipython.magic
_magic('load_ext autoreload')
_magic('autoreload 2')
except:
pass
# %% [markdown]
# ## Ideas:
# - Root mean square diffence??
# - Scatter plots of all values, e.g x-- sectional y-- non sectional color by lat/lev? Or lev lat difference.
# %% [markdown]
# # Map plots number concentration:
# %%
model = 'NorESM'
startyear = '2008-01'
endyear = '2010-12'
p_level=1013.
pmin = 850. # minimum pressure level
avg_over_lev = True # True#True#False#True
pressure_adjust = True # Can only be false if avg_over_lev false. Plots particular hybrid sigma lev
if avg_over_lev:
pressure_adjust = True
p_levels = [1013.,900., 800., 700., 600.] # used if not avg
# %%
from oas_dev.constants import get_plotpath
from oas_dev.util.practical_functions import make_folders
version='v21dd_both'
plot_path = get_plotpath('maps')
filen_base = plot_path+'/_%s'%version
#print(plot_path)
make_folders(plot_path)
# %% [markdown]
# ## Cases
# %%
#cases_sec = ['SECTv21_ctrl']
#cases_orig =['noSECTv21_default']
#cases_orig =['noSECTv21_ox_ricc']
to_case = 'SECTv21_ctrl_koagD'
from_cases = ['noSECTv21_default_dd','noSECTv21_ox_ricc_dd']
cases =[to_case]+from_cases
# %%
from oas_dev.constants import get_plotpath
from oas_dev.util.practical_functions import make_folders
#plot_path = get_plotpath('maps')
#filen_base = plot_path+'/_%s'%version
#print(plot_path)
#make_folders(plot_path)
# %%
def load_and_plot(var, cases,startyear, endyear, period=None,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=None, relative=False):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
clear_output()
return plot_map_abs_abs_diff(var, cases, maps_dic, relative=relative, figsize=[18, 3], cbar_equal=True,
kwargs_abs={},
kwargs_diff={}, axs=None, cmap_abs='Reds', cmap_diff='RdBu_r')
# %%
def load_and_plot_rows(varl, cases,startyear, endyear, period=None,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=None, relative=False):
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev, time_mask=period,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
fig, axs = subplots_map(len(varl), 3, figsize=[18,3*len(varl)])
ii=0
for var in varl:
axss= axs[ii,:]
ii+=1
plot_map_abs_abs_diff(var, cases, maps_dic, relative=relative, figsize=[18, 3], cbar_equal=True,
kwargs_abs={}, axs=axss,
kwargs_diff={}, cmap_abs='Reds', cmap_diff='RdBu_r')
return axs
# %%
def load_and_plot_diff(varl, cases,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,nr_cols=2,
pressure_adjust=pressure_adjust,
p_level=None,
period=None,
relative=False,
width=5.):
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev, time_mask=period,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
plot_diff(maps_dic, varl, cases,nr_cols=nr_cols, relative=relative, width=width, period=None)
return
def plot_diff(maps_dic, varl, cases,nr_cols=2, relative=False, width=5., axs=None, period=None):
#fig, axs = subplots_map(int(np.ceil(len(varl)/2)), 2, figsize=[10,4*len(varl)])
if axs is None:
nr_rows = int(np.ceil(len(varl)/nr_cols))
print(nr_rows)
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,2.5*nr_rows])#7*nr_cols,3*nr_rows])
for var, ax in zip(varl, axs.flatten()):
plot_map_diff_2case(var,cases[0],cases[1], maps_dic, relative=relative,
ax=ax, cmap_diff='RdBu_r')
# %%
def load_and_plot_diff_mm(varl,to_case,from_cases,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,nr_cols=2,
pressure_adjust=pressure_adjust,
p_level=None,
relative=False,
width=6., height=2.3):
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_rows = int(np.ceil(len(varl)/nr_cols))
nr_cols = len(from_cases)
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,height*nr_rows])
for i, var in enumerate(varl):
if len(varl) == 1: saxs = axs
else: saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
cbar_loc='side', tight_layout=False, inverse_diff=True, axs=saxs)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# plot_diff(maps_dic, varl, [from_case,to_case],nr_cols=nr_cols, relative=relative, width=width, axs=sax)
subp_insert_abc(axs, pos_y=0.1)
return
# %% [markdown]
# ## Mean to 850hPa weighted by pressure difference:
# %% [markdown]
# ### CCN:
# %%
from useful_scit.plot.fig_manip import subp_insert_abc
# %%
varl_rel = ['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']
varl_abs=['NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
period='JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
varl_rel = ['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']
varl_abs=['NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
period='JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
varl_rel = ['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']
varl_abs=['NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
period='DJF'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='ACTNL_incld'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','SON','DJF','MAM',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='N_AER'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','DJF',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#axs[i,0].text(x=x_text_annotation, y=670000, s='Holiday in US', alpha=0.7, color='#334f8d'))
if period is None: pper = 'All year'
else: pper = period
print(i, period)
for ax in saxs:
ax.text(-.1,y=.5,
verticalalignment='center', s=pper, transform=ax.transAxes, rotation=90, weight='bold')
#axs[i,1].text(-.1,y=.1, s=pper, transform=ax.transAxes) #, weight='bold'
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='ACTLN_incld'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','DJF',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#axs[i,0].text(x=x_text_annotation, y=670000, s='Holiday in US', alpha=0.7, color='#334f8d'))
if period is None: pper = 'All year'
else: pper = period
print(i, period)
for ax in saxs:
ax.text(-.1,y=.5,
verticalalignment='center', s=pper, transform=ax.transAxes, rotation=90, weight='bold')
#axs[i,1].text(-.1,y=.1, s=pper, transform=ax.transAxes) #, weight='bold'
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='NCONC01'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','DJF',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#axs[i,0].text(x=x_text_annotation, y=670000, s='Holiday in US', alpha=0.7, color='#334f8d'))
if period is None: pper = 'All year'
else: pper = period
print(i, period)
for ax in saxs:
ax.text(-.1,y=.5,
verticalalignment='center', s=pper, transform=ax.transAxes, rotation=90, weight='bold')
#axs[i,1].text(-.1,y=.1, s=pper, transform=ax.transAxes) #, weight='bold'
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='cb_NA'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','DJF',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#axs[i,0].text(x=x_text_annotation, y=670000, s='Holiday in US', alpha=0.7, color='#334f8d'))
if period is None: pper = 'All year'
else: pper = period
print(i, period)
for ax in saxs:
ax.text(-.1,y=.5,
verticalalignment='center', s=pper, transform=ax.transAxes, rotation=90, weight='bold')
#axs[i,1].text(-.1,y=.1, s=pper, transform=ax.transAxes) #, weight='bold'
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
var='cb_SOA_NA'#l_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
periods=['JJA','DJF',None]#'JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(periods)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i,period in enumerate(periods):
maps_dic = get_averaged_fields.get_maps_cases(cases,[var],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=relative, cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#axs[i,0].text(x=x_text_annotation, y=670000, s='Holiday in US', alpha=0.7, color='#334f8d'))
if period is None: pper = 'All year'
else: pper = period
print(i, period)
for ax in saxs:
ax.text(-.1,y=.5,
verticalalignment='center', s=pper, transform=ax.transAxes, rotation=90, weight='bold')
#axs[i,1].text(-.1,y=.1, s=pper, transform=ax.transAxes) #, weight='bold'
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
varl_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
varl_abs=[]#'NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
period='JJA'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
varl_rel = ['NCONC01','N_AER','cb_SOA_NA','cb_SO4_NA']
varl_abs=[]#'NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
period='DJF'
width=4.7
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin, time_mask=period,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip(from_cases,range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,to_case, maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}_{period}.'
print(fn)
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
# %%
varl_rel = ['AWNC_incld', 'AREL_incld','TGCLDCWP']
varl_abs=['NCFT_Ghan']#,'TGCLDCWP']
varl = varl_rel+varl_abs
#varl=['ACTNL_incld', 'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
width=4.4
asp_rat = 0.48
relative=True
cases = [to_case] + from_cases
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
nr_cols = len(from_cases)
nr_rows = int(np.ceil(len(varl)))
fig, axs = subplots_map(nr_rows, nr_cols, figsize=[width*nr_cols,asp_rat*width*nr_rows])
for i, var in enumerate(varl):
saxs = axs[i,:]
plot_map_diff_only(var, [to_case,*from_cases], maps_dic, relative=(var in varl_rel), cbar_equal=True,
kwargs_diff={}, axs=saxs, cmap_diff='RdBu_r',
cbar_loc='side', tight_layout=False, inverse_diff=True)
#for from_case,i in zip([to_case, from_cases[-1]],range(nr_cols)):
# sax = axs[:,i]
# for var, ax in zip(varl, sax.flatten()):
# plot_map_diff_2case(var, from_case,from_cases[0], maps_dic, relative=(var in varl_rel),
# ax=ax, cmap_diff='RdBu_r')
subp_insert_abc(axs, pos_y=0.1)
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
#load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
# %%
varl=['N_AER','NCONC01']#,'LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin,
relative=relative,
pressure_adjust=pressure_adjust,
nr_cols=1,
width=4)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
print(fn)
# %%
varl=['DIR_Ghan']#,'LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear,
endyear,
avg_over_lev,
pmin=pmin,
relative=relative,
pressure_adjust=pressure_adjust,
nr_cols=1,
width=4.1,
height=2.1)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf', dpi=300)
print(fn)
# %%
varl=['CDOD550']#,'LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin,
relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
print(fn)
# %%
varl=['NCONC01','NMR01']#,'LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin,
relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
print(fn)
# %%
varl=['cb_SOA_NA','cb_SO4_NA', 'cb_NA']#,'LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin,
relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
#plt.savefig(fn + 'png')
#plt.savefig(fn + 'pdf')
print(fn)
# %%
maps_dic = get_averaged_fields.get_maps_cases(cases,['DIR_Ghan'],startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
# %%
cases
# %%
maps_dic[cases[0]]['DIR_Ghan']
# %%
dff_ = maps_dic[cases[1]][['DIR_Ghan']]- maps_dic[cases[0]][['DIR_Ghan']]#['DIR_Ghan']
print(cases[1])
# %%
dff2_ = maps_dic[cases[2]][['DIR_Ghan']]- maps_dic[cases[0]][['DIR_Ghan']]#['DIR_Ghan']
print(cases[2])
# %%
from oas_dev.util.slice_average.avg_pkg import average_model_var
# %%
average_model_var(dff_, 'DIR_Ghan', area='Global')
# %%
average_model_var(dff2_, 'DIR_Ghan', area='Global')
# %%
varl=['LWDIR_Ghan','LWDIR_Ghan']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['SOA_NA_totLossR','SOA_NA_lifetime']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['SOA_NA_lifetime','SOA_NA_lifetime']#'LWDIR_Ghan']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['HYGRO01','HYGRO01']#, 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['HYGRO01', 'HYGRO01']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['SOA_NAcondTend', 'SO4_NAcondTend']#, 'leaveSecH2SO4','leaveSecSOA']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['SWCF_Ghan', 'LWCF_Ghan', 'NCFT_Ghan']#'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
relative=False
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['cb_SOA_NA', 'cb_SO4_NA']#, 'NCFT_Ghan']#'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['N_AER', 'NCONC01']#, 'NCFT_Ghan']#'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
varl=['FORMRATE', 'GR','COAGNUCL']#, 'NCFT_Ghan']#'ACTREL_incld','TGCLDCWP']#,'TGCLDCWP']
relative=True
#plot_diff(maps_dic, varl, cases[::-1],nr_cols=1, relative=relative)
load_and_plot_diff_mm(varl,to_case,from_cases, startyear, endyear, avg_over_lev, pmin=pmin, relative=relative, pressure_adjust=pressure_adjust,nr_cols=1, width=5.5)
fn = filen_base + '_'.join(varl)+f'{relative}.'
plt.tight_layout()
plt.savefig(fn + 'png')
plt.savefig(fn + 'pdf')
# %%
# %%
| 37.658266
| 166
| 0.643066
| 5,478
| 37,357
| 4.091822
| 0.050383
| 0.031586
| 0.032121
| 0.037475
| 0.911131
| 0.906313
| 0.903324
| 0.899309
| 0.88936
| 0.884542
| 0
| 0.017624
| 0.207137
| 37,357
| 991
| 167
| 37.696266
| 0.739154
| 0.311106
| 0
| 0.823204
| 0
| 0
| 0.060376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009208
| false
| 0.001842
| 0.020258
| 0
| 0.036832
| 0.044199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bfeffb76cfd474276aecdbe95558f9ca99969dbb
| 13,731
|
py
|
Python
|
genemail/test_testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | 5
|
2015-08-13T05:22:54.000Z
|
2018-08-28T14:14:55.000Z
|
genemail/test_testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | null | null | null |
genemail/test_testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2013/10/22
# copy: (C) Copyright 2013 Cadit Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import sys, unittest
try:
import pxml
except ImportError:
pxml = None
from .testing import EmailTestMixin
#------------------------------------------------------------------------------
class TestEmailMixin(EmailTestMixin, unittest.TestCase):
maxDiff = None
#----------------------------------------------------------------------------
def setUp(self):
super(TestEmailMixin, self).setUp()
self.noxml = False
#----------------------------------------------------------------------------
def test_mixin_headers_same(self):
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
Date: Fri, 13 Feb 2009 23:31:30 -0000
To: test@example.com
Message-ID: <1234567890@@genemail.example.com>
From: noreply@example.com
Subject: Foo The Bar
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
CONTENT
--==genemail.test-alt-2==
'''
eml2 = '''\
date: Fri, 13 Feb 2009 23:31:30 -0000
subject: Foo The Bar
from: noreply@example.com
mime-version: 1.0
to: test@example.com
content-type: multipart/alternative; boundary="==genemail.test-BOUNDARY-alt-2=="
message-id: <1234567890@@genemail.example.com>
--==genemail.test-BOUNDARY-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
CONTENT
--==genemail.test-BOUNDARY-alt-2==
'''
self.assertEmailEqual(eml1, eml2)
#----------------------------------------------------------------------------
def test_mixin_headers_diff(self):
eml1 = '''\
Content-Type: text/plain
MIME-Version: 1.0
Date: Fri, 13 Feb 2009 23:31:30 -0000
To: test@example.com
Message-ID: <1234567890@@genemail.example.com>
From: noreply@example.com
Subject: Foo The Bar
CONTENT
'''
eml2 = '''\
Date: Fri, 13 Feb 2009 23:31:30 -0000
Subject: Foo The Bar
X-Generator: an extra header (note that mime-version is missing)
From: noreply@example.com
To: test@example.com
Message-ID: <1234567890@@genemail.example.com>
Content-Type: text/plain; charset=us-ascii
CONTENT
'''
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
EMAIL HEADERS:
- Content-Type: text/plain
+ Content-Type: text/plain; charset=us-ascii
Date: Fri, 13 Feb 2009 23:31:30 -0000
From: noreply@example.com
Message-ID: <1234567890@@genemail.example.com>
- MIME-Version: 1.0
Subject: Foo The Bar
To: test@example.com
+ X-Generator: an extra header (note that mime-version is missing)
''')
#----------------------------------------------------------------------------
def test_mixin_structure_same(self):
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: multipart/related; boundary="==genemail.test-rel-3=="
MIME-Version: 1.0
--==genemail.test-rel-3==
Content-Type: text/plain
MIME-Version: 1.0
--==genemail.test-rel-3==
Content-Type: image/png
MIME-Version: 1.0
--==genemail.test-rel-3==--
--==genemail.test-alt-2==--
'''
eml2 = '''\
Content-Type: multipart/alternative; boundary="==BOUNDARY-f8967b6d-alt-2=="
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-alt-2==
Content-Type: multipart/related; boundary="==BOUNDARY-f8967b6d-rel-3=="
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==
Content-Type: text/plain
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==
Content-Type: image/png
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==--
--==BOUNDARY-f8967b6d-alt-2==--
'''
self.assertEmailEqual(eml1, eml2)
#----------------------------------------------------------------------------
def test_mixin_structure_diff(self):
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: multipart/related; boundary="==genemail.test-rel-3=="
MIME-Version: 1.0
--==genemail.test-rel-3==
Content-Type: text/plain
MIME-Version: 1.0
--==genemail.test-rel-3==
Content-Type: image/png
MIME-Version: 1.0
--==genemail.test-rel-3==--
--==genemail.test-alt-2==--
'''
eml2 = '''\
Content-Type: multipart/alternative; boundary="==BOUNDARY-f8967b6d-alt-2=="
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-alt-2==
Content-Type: multipart/related; boundary="==BOUNDARY-f8967b6d-rel-3=="
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==
Content-Type: text/plain
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==
Content-Type: image/png
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==
Content-Type: text/svg
MIME-Version: 1.0
--==BOUNDARY-f8967b6d-rel-3==--
--==BOUNDARY-f8967b6d-alt-2==--
'''
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
EMAIL STRUCTURE:
multipart/alternative
|-- text/plain
`-- multipart/related
|-- text/plain
- `-- image/png
? ^
+ |-- image/png
? ^
+ `-- text/svg
''')
#----------------------------------------------------------------------------
def test_mixin_content_textplain_same(self):
eml1 = '''\
Content-Type: text/plain
MIME-Version: 1.0
this is some content.
'''
eml2 = '''\
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
this is some=
content.
'''
self.assertEmailEqual(eml1, eml2)
#----------------------------------------------------------------------------
def test_mixin_content_textplain_diff(self):
eml1 = '''\
Content-Type: text/plain
MIME-Version: 1.0
this is some content.
'''
eml2 = '''\
Content-Type: text/plain
MIME-Version: 1.0
this is some=
content.
'''
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
- this is some content.
+ this is some=
+ content.
''')
#----------------------------------------------------------------------------
def test_mixin_content_multiparttextplain_same(self):
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
this is some content.
--==genemail.test-alt-2==--
'''
eml2 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
this is some con=
tent.
--==genemail.test-alt-2==--
'''
self.assertEmailEqual(eml1, eml2)
#----------------------------------------------------------------------------
def test_mixin_content_multiparttextplain_diff(self):
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
this is some content.
--==genemail.test-alt-2==--
'''
eml2 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
--==genemail.test-alt-2==
Content-Type: text/plain
MIME-Version: 1.0
this is some con=
tent.
--==genemail.test-alt-2==--
'''
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
- this is some content.
+ this is some con=
tent.
? ++
''')
#----------------------------------------------------------------------------
def assertXmlEqual(self, x1, x2, msg=None):
if self.noxml or pxml is None:
return self.assertMultiLineEqual(x1, x2, msg=msg)
class PxmlXmlTest(pxml.XmlTestMixin, unittest.TestCase):
def runTest(self): pass
PxmlXmlTest().assertXmlEqual(x1, x2, msg=msg)
#----------------------------------------------------------------------------
def test_mixin_content_texthtml_same(self):
eml1 = '''\
Content-Type: text/html
MIME-Version: 1.0
<html><body id="Foo" class="bar">hello</body></html>
'''
eml2 = '''\
Content-Type: text/html
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
<html ><body class='bar' id='Foo' >hello=
</body ></html>
'''
# note: these are not actually semantically different, but
# this is a test of behaviour if 'assertXmlEqual'
# is NOT available.
self.noxml = True
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
- <html><body id="Foo" class="bar">hello</body></html>
? --------- ^ ^
+ <html ><body class='bar' id='Foo' >hello</body ></html>
? ++ ^ ^^^^^^^^^^^ ++
''')
if pxml is None:
sys.stderr.write('*** PXML LIBRARY NOT PRESENT - SKIPPING XML DIFF *** ')
return
self.noxml = False
self.assertEmailEqual(eml1, eml2)
#----------------------------------------------------------------------------
def test_mixin_content_texthtml_diff(self):
eml1 = '''\
Content-Type: text/html
MIME-Version: 1.0
<html><body id="Foo" class="bar">hello</body></html>
'''
eml2 = '''\
Content-Type: text/html
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
<html ><body class = 'bar' >hel=
lo</body></html>
'''
# note: these are both syntactically AND semantically
# different... they should be different with and without
# xml processing - but the errors should be different.
self.noxml = True
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
- <html><body id="Foo" class="bar">hello</body></html>
? --------- ^ ^
+ <html ><body class = 'bar' >hello</body></html>
? + + ^^ ^^
''')
if pxml is None:
sys.stderr.write('*** PXML LIBRARY NOT PRESENT - SKIPPING XML DIFF *** ')
return
self.noxml = False
with self.assertRaises(AssertionError) as cm:
self.assertEmailEqual(eml1, eml2)
msg = '\n'.join(cm.exception.message.split('\n')[1:])
self.assertMultiLineEqual(msg, '''\
<?xml version="1.0" encoding="UTF-8"?>
<html>
- <body class="bar" id="Foo">hello</body>
? ---------
+ <body class="bar">hello</body>
</html>
''')
#----------------------------------------------------------------------------
def test_mixin_allinone(self):
if pxml is None:
sys.stderr.write('*** PXML LIBRARY NOT PRESENT - SKIPPING XML DIFF *** ')
return
eml1 = '''\
Content-Type: multipart/alternative; boundary="==genemail.test-alt-2=="
MIME-Version: 1.0
Date: Fri, 13 Feb 2009 23:31:30 -0000
To: test@example.com
Message-ID: <1234567890@@genemail.example.com>
From: noreply@example.com
Subject: Foo The Bar
--==genemail.test-alt-2==
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Foo the bar [].
--==genemail.test-alt-2==
Content-Type: multipart/related; boundary="==genemail.test-rel-3=="
MIME-Version: 1.0
--==genemail.test-rel-3==
MIME-Version: 1.0
Content-Type: text/html; charset="us-ascii"
Content-Transfer-Encoding: 7bit
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Foo The Bar</title>
</head>
<body id="bar" class="foo">
<p>Foo the bar <img src="cid:slogan.txt" />.</p>
</body>
</html>
--==genemail.test-rel-3==
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment
Content-ID: <slogan.txt>
ALL YOUR BASE ARE BELONG TO US
--==genemail.test-rel-3==--
--==genemail.test-alt-2==--
'''
eml2 = '''\
Content-Type: multipart/alternative; boundary="==ARANDOMBOUNDARY-HEHE-alt-2=="
MIME-Version: 1.0
Date: Fri, 13 Feb 2009 23:31:30 -0000
To: test@example.com
Message-ID: <1234567890@@genemail.example.com>
From: noreply@example.com
Subject: Foo The Bar
--==ARANDOMBOUNDARY-HEHE-alt-2==
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Foo the bar [].
--==ARANDOMBOUNDARY-HEHE-alt-2==
Content-Type: multipart/related; boundary="==ARANDOMBOUNDARY-HEHE-rel-3=="
MIME-Version: 1.0
--==ARANDOMBOUNDARY-HEHE-rel-3==
MIME-Version: 1.0
Content-Type: text/html; charset="us-ascii"
Content-Transfer-Encoding: 7bit
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Foo The Bar</title>
</head>
<body class="foo" id="bar">
<p>Foo the bar <img src="cid:slogan.txt" />.</p>
</body>
</html>
--==ARANDOMBOUNDARY-HEHE-rel-3==
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment
Content-ID: <slogan.txt>
ALL YOUR BASE ARE BELONG TO US
--==ARANDOMBOUNDARY-HEHE-rel-3==--
--==ARANDOMBOUNDARY-HEHE-alt-2==--
'''
self.assertEmailEqual(eml1, eml2)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| 26.610465
| 80
| 0.600757
| 1,727
| 13,731
| 4.754488
| 0.111175
| 0.073682
| 0.059189
| 0.083912
| 0.864937
| 0.837413
| 0.819632
| 0.802217
| 0.762757
| 0.741688
| 0
| 0.044726
| 0.13211
| 13,731
| 515
| 81
| 26.662136
| 0.64429
| 0.133639
| 0
| 0.82598
| 0
| 0
| 0.718921
| 0.250485
| 0
| 0
| 0
| 0
| 0.073529
| 1
| 0.034314
| false
| 0.002451
| 0.009804
| 0
| 0.061275
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
871b3961c1925c446e60fc4584c452ddae05ef97
| 174
|
py
|
Python
|
fantasy_sport/__init__.py
|
josuebrunel/yfs
|
42b2862ac76dbe66ed3d92469bab839419cf32cc
|
[
"MIT"
] | 27
|
2015-06-22T19:46:22.000Z
|
2021-06-21T11:07:59.000Z
|
fantasy_sport/__init__.py
|
josuebrunel/yfs
|
42b2862ac76dbe66ed3d92469bab839419cf32cc
|
[
"MIT"
] | 38
|
2015-06-22T18:40:55.000Z
|
2018-05-29T14:39:01.000Z
|
fantasy_sport/__init__.py
|
josuebrunel/yfs
|
42b2862ac76dbe66ed3d92469bab839419cf32cc
|
[
"MIT"
] | 14
|
2015-06-27T03:45:29.000Z
|
2020-06-15T14:37:07.000Z
|
from __future__ import absolute_import
from fantasy_sport.fantasy_sport import FantasySport
from fantasy_sport.roster import Player, Roster
from fantasy_sport import utils
| 29
| 52
| 0.873563
| 24
| 174
| 5.958333
| 0.416667
| 0.335664
| 0.335664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109195
| 174
| 5
| 53
| 34.8
| 0.922581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8724784a5279497df3902628f9077a84457c8d84
| 26,569
|
py
|
Python
|
iter_tasks/scripts/old/assembly_task_generation.py
|
Wisc-HCI/ITER
|
2ae8a5f0ae17783db4db25198ec0d97e72cd7296
|
[
"MIT"
] | 1
|
2021-04-07T15:54:44.000Z
|
2021-04-07T15:54:44.000Z
|
iter_tasks/scripts/old/assembly_task_generation.py
|
Wisc-HCI/ITER
|
2ae8a5f0ae17783db4db25198ec0d97e72cd7296
|
[
"MIT"
] | null | null | null |
iter_tasks/scripts/old/assembly_task_generation.py
|
Wisc-HCI/ITER
|
2ae8a5f0ae17783db4db25198ec0d97e72cd7296
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import copy
import json
import math
BLOCK_1x4 = (0.031,0.126,0.018) #0.038 full
BLOCK_1x3 = (0.031,0.095,0.036) #0.058 full
BLOCK_1x1 = (0.031,0.031,0.036) #0.058 full
if len(sys.argv) != 2:
print 'must supply the robot config file to use'
exit()
configFileName = sys.argv[1]
config = json.load(open('./configs/assembly/'+ configFileName +'.json','r'))
SAFE_HEIGHT = config['safe_height']
GRASP_OFFSET = config['grasp_offset']
GRASP_EFFORT = config['grasp_effort']
RELEASE_EFFORT = config['release_effort']
NUM_ITERATIONS = config['num_iterations']
WORKSPACE_POSITION = config['workspace_position']
HOME_POSITION = config['home_position']
DOWN_GY_ORIENTATION = config['down_gy_orientation']
DOWN_GX_ORIENTATION = config['down_gx_orientation']
SPACING = config['block_spacing']
USE_TABLE = False
if 'table' in config.keys():
USE_TABLE = True
TABLE = config['table']
class Queue:
def __init__(self, origin_position, orientation, num_items, item_dimensions, spacing, name_unique='', offset_z=True, mode='x'):
self.origin_position = origin_position
self.number_of_items = num_items
self.idims = item_dimensions
self.spacing = spacing
self._index = 0
self.name = 'queue' + str(item_dimensions[0])\
+ 'x' + str(item_dimensions[1])\
+ 'x' + str(item_dimensions[2]) + name_unique
self.offset_z = offset_z
self.mode = mode
if orientation == 'HORIZONTAL_LEFT' or orientation == 'HORIZONTAL_RIGHT':
self.orientation = orientation
else:
raise Exception('Invalid Orientation Enum');
def _get(self,index):
global DOWN_GX_ORIENTATION
if self.mode == 'x':
target_position = {
'x': self.origin_position['x']
+ (index * (self.spacing + self.idims[0]) + 0.5 * self.idims[0])
* (-1 if self.orientation == "HORIZONTAL_LEFT" else 1),
'y': self.origin_position['y'] + 0.5 * self.idims[1],
'z': self.origin_position['z'] + self.idims[2] * (0.5 if self.offset_z else 1)
}
elif self.mode == 'y':
target_position = {
'x': self.origin_position['x'] + 0.5 * self.idims[1],
'y': self.origin_position['y']
+ (index * (self.spacing + self.idims[0]) + 0.5 * self.idims[0])
* (-1 if self.orientation == "HORIZONTAL_LEFT" else 1),
'z': self.origin_position['z'] + self.idims[2] * (0.5 if self.offset_z else 1)
}
else:
target_position = {
'x': 0,
'y': 0,
'z': 0
}
target_orientation = copy.deepcopy(DOWN_GX_ORIENTATION)
return target_position, target_orientation
def get_next(self):
global SAFE_HEIGHT, GRASP_OFFSET, GRASP_EFFORT
obj_id = self.name + '_' + str(self._index)
if self._index >= self.number_of_items:
raise Exception('There are no more items in this queue')
target_position, target_orientation = self._get(self._index)
task_list = [
# move from current position to above queue item
{
"name": "move",
"position": {
'x': target_position['x'],
'y': target_position['y'],
'z': target_position['z'] + GRASP_OFFSET + 0.055
},
"orientation": target_orientation
},
# move down to item
{
"name": "move",
"position": {
'x': target_position['x'],
'y': target_position['y'],
'z': target_position['z'] + GRASP_OFFSET
},
"orientation": target_orientation
},
# Attach to moveit model
{
"name": "connect_object",
"object_name": self.name + '_' + str(self._index)
},
# grasp item
{
"name": "grasp",
"effort": GRASP_EFFORT
},
# raise to homing position
{
"name": "move",
"position": {
'x': target_position['x'],
'y': target_position['y'],
'z': target_position['z'] + GRASP_OFFSET + 0.055
},
"orientation": target_orientation
}
]
self._index += 1
return task_list, obj_id
def env_list(self):
global DOWN_GY_ORIENTATION, DOWN_GX_ORIENTATION
obj_list = []
for index in range(0,self.number_of_items):
target_position, target_orientation = self._get(index)
target_position['z'] = target_position['z'] * (1 if self.offset_z else 0.5)
obj_list.append({
'name': self.name + '_' + str(index),
'representation': 'box',
'position': target_position,
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION),
'size': {
'x': (self.idims[0]),
'y': (self.idims[1]),
'z': (self.idims[2])
}
})
return obj_list
class AssemblyTask:
def __init__(self):
#TODO perhaps move the queues here, set all functions except generate to
# static
pass
def home_position(self):
return {
'name': 'move',
'position': copy.deepcopy(HOME_POSITION),
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
}
def wait_for_human(self):
return {
'name': 'wait',
'condition': 'button'
}
def build_base(self,queue_b4x1,queue_b3x1):
task_list = []
task_list.append({
'name': 'logger',
'msg': 'Task Progress: Building Base'
})
# Base 3x1 - 1
li, id = queue_b3x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5 + 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5 + 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x3[2] * 0.5 + GRASP_OFFSET
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5 + 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# Base 3x1 - 2
li, id = queue_b3x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x3[2] * 0.5 + GRASP_OFFSET
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# Base 4x1 - 1
li, id = queue_b4x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5 - 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5 - 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x4[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + 0.02
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5 - 0.005,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
# Base 4x1 - 2
li, id = queue_b4x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x4[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + 0.02
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
return task_list
def build_pillars(self,queue_b1x1):
task_list = []
task_list.append({
'name': 'logger',
'msg': '\nTask Progress: Building Pillars Layer\n'
})
# block 1x1 - 1
li, id = queue_b1x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x1[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.002
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# block 1x1 - 2
li, id = queue_b1x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x1[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.002
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# block 1x1 - 3
li, id = queue_b1x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x1[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.002
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# block 1x1 - 4
li, id = queue_b1x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x1[2] * 0.5 + GRASP_OFFSET + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.002
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
return task_list
def build_top(self,queue_b4x1,queue_b3x1):
task_list = []
task_list.append({
'name': 'logger',
'msg': 'Task Progress: Building Top'
})
# block 4x1 - 1
li, id = queue_b4x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x4[2] * 0.5 + GRASP_OFFSET + BLOCK_1x1[2] + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.022
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
# block 4x1 - 2
li, id = queue_b4x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x4[2] * 0.5 + GRASP_OFFSET + BLOCK_1x1[2] + BLOCK_1x3[2] + BLOCK_1x4[2] + 0.022
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x4[1] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x1[1] * 2.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GX_ORIENTATION)
})
# block 3x1 - 1
li, id = queue_b3x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x3[2] * 0.5 + GRASP_OFFSET + BLOCK_1x1[2] + BLOCK_1x3[2] + 2 * BLOCK_1x4[2] + 0.004
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x3[0] * 0.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
# block 3x1 - 2
li, id = queue_b3x1.get_next()
task_list = task_list + li
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + BLOCK_1x3[2] * 0.5 + GRASP_OFFSET + BLOCK_1x1[2] + BLOCK_1x3[2] + 2 * BLOCK_1x4[2] + 0.004
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
task_list.append({
"name": "disconnect_object",
"object_name": id
})
task_list.append({
'name': 'move',
'position': {
'x': WORKSPACE_POSITION['x'] + BLOCK_1x1[0] * 3.5,
'y': WORKSPACE_POSITION['y'] + BLOCK_1x3[1] * 0.5,
'z': WORKSPACE_POSITION['z'] + SAFE_HEIGHT
},
'orientation': copy.deepcopy(DOWN_GY_ORIENTATION)
})
return task_list
def generate(self,queue_b4x1,queue_b3x1,queue_b1x1_1):
task_list = []
task_list.append(self.home_position())
task_list.append({
'name': 'release',
'effort': RELEASE_EFFORT
})
for i in range(0,NUM_ITERATIONS):
task_list.append({
'name': 'logger',
'msg': 'Task Iteration = ' + str(i)
})
task_list += self.build_base(queue_b4x1,queue_b3x1)
#task_list += self.build_pillars(queue_b1x1_1)
#task_list += self.build_top(queue_b4x1,queue_b3x1)
task_list.append(self.home_position())
task_list.append(self.wait_for_human())
return task_list
if __name__ == "__main__":
QUEUES = {}
for q in config['queues']:
if q['name'] == 'queue_b4x1':
QUEUES[q['name']] = Queue(q['position'],'HORIZONTAL_LEFT',4,BLOCK_1x4,SPACING,offset_z=False,mode=q['mode'])
elif q['name'] == 'queue_b3x1':
QUEUES[q['name']] = Queue(q['position'],'HORIZONTAL_LEFT',4,BLOCK_1x3,SPACING,offset_z=False,mode=q['mode'])
elif q['name'] == 'queue_b1x1':
QUEUES[q['name']] = Queue(q['position'],'HORIZONTAL_LEFT',4,BLOCK_1x1,SPACING,mode=q['mode'])
taskGen = AssemblyTask()
task_list = taskGen.generate(
QUEUES['queue_b4x1'],
QUEUES['queue_b3x1'],
QUEUES['queue_b1x1'])
# convert to radians if Euler angles
for t in task_list:
if t['name'] == 'move' and 'w' not in t['orientation']:
t['orientation']['x'] = t['orientation']['x'] / 180.0 * math.pi
t['orientation']['y'] = t['orientation']['y'] / 180.0 * math.pi
t['orientation']['z'] = t['orientation']['z'] / 180.0 * math.pi
env_list = []
env_list += QUEUES['queue_b4x1'].env_list()
env_list += QUEUES['queue_b3x1'].env_list()
env_list += QUEUES['queue_b1x1'].env_list()
if USE_TABLE:
env_list.append({
'name': 'tabletop',
'representation': 'box',
'position': TABLE['position'],
'orientation': {
'x': 0,
'y': 0,
'z': 0,
'w': 1
},
'size': TABLE['size']
})
task = {
'task': task_list,
'environment': env_list
}
# save final file
f = open('../plans/test.json','w')
json.dump(task,f,indent=4)
| 35.097754
| 137
| 0.488615
| 2,838
| 26,569
| 4.330867
| 0.067653
| 0.152144
| 0.077455
| 0.095192
| 0.785697
| 0.771377
| 0.740379
| 0.720283
| 0.714995
| 0.707835
| 0
| 0.046732
| 0.365351
| 26,569
| 756
| 138
| 35.14418
| 0.682185
| 0.021152
| 0
| 0.709581
| 0
| 0
| 0.114434
| 0
| 0
| 0
| 0
| 0.001323
| 0
| 0
| null | null | 0.001497
| 0.005988
| null | null | 0.001497
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87629ba504c4bc8da56a3f9811434a31e440ab17
| 4,642
|
py
|
Python
|
quots/migrations/0001_initial.py
|
GSByeon/openhgsenti
|
d7806f58c81127d32091d9875a99ac13aef94a8a
|
[
"Apache-2.0"
] | 29
|
2018-05-29T06:47:34.000Z
|
2022-02-22T04:38:53.000Z
|
quots/migrations/0001_initial.py
|
GSByeon/openhgsenti
|
d7806f58c81127d32091d9875a99ac13aef94a8a
|
[
"Apache-2.0"
] | 2
|
2018-08-28T08:02:14.000Z
|
2018-11-26T08:19:16.000Z
|
quots/migrations/0001_initial.py
|
drexly/openhgsenti
|
d7806f58c81127d32091d9875a99ac13aef94a8a
|
[
"Apache-2.0"
] | 11
|
2018-06-26T00:47:52.000Z
|
2020-12-22T14:14:18.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-03-16 16:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='In',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jumun', models.CharField(max_length=200)),
('type', models.CharField(max_length=200)),
('outnums', models.IntegerField(default=0)),
('reg_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Registered Date')),
('ans_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Selected Date')),
('content', models.CharField(max_length=200)),
('pic0', models.CharField(max_length=200)),
('pic1', models.CharField(max_length=200)),
('pic2', models.CharField(max_length=200)),
('pic3', models.CharField(max_length=200)),
('pic4', models.CharField(max_length=200)),
('pic5', models.CharField(max_length=200)),
('pic6', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Inner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=200)),
('nums', models.IntegerField(default=0)),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Registered Date')),
('update', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Latest UpDate')),
('newnums', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Out',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dapbyun', models.CharField(max_length=200)),
('inflag', models.BooleanField(default=False)),
('reg_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Registered Date')),
('sel_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Selected Date')),
('content', models.CharField(max_length=200)),
('pic0', models.CharField(max_length=200)),
('pic1', models.CharField(max_length=200)),
('pic2', models.CharField(max_length=200)),
('pic3', models.CharField(max_length=200)),
('pic4', models.CharField(max_length=200)),
('pic5', models.CharField(max_length=200)),
('pic6', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Outer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=200)),
('nums', models.IntegerField(default=0)),
('credits', models.IntegerField(default=0)),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Registered Date')),
('update', models.DateTimeField(default=django.utils.timezone.now, verbose_name=b'Latest UpDate')),
('newnums', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='out',
name='handler',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quots.Outer'),
),
migrations.AddField(
model_name='out',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quots.In'),
),
migrations.AddField(
model_name='in',
name='answered',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.Outer'),
),
migrations.AddField(
model_name='in',
name='orderer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quots.Inner'),
),
]
| 46.888889
| 119
| 0.579492
| 469
| 4,642
| 5.61194
| 0.200426
| 0.119681
| 0.143617
| 0.191489
| 0.830547
| 0.81003
| 0.771657
| 0.771657
| 0.771657
| 0.771657
| 0
| 0.029691
| 0.274451
| 4,642
| 98
| 120
| 47.367347
| 0.751781
| 0.014649
| 0
| 0.677778
| 1
| 0
| 0.089696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.088889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5e48ea84df740e6a54de0548ea14302f1b82b9ba
| 20,799
|
py
|
Python
|
llvm/tests/test_struct_args.py
|
KennethNielsen/llvmpy
|
70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e
|
[
"BSD-3-Clause"
] | 140
|
2015-01-07T20:58:12.000Z
|
2022-01-21T17:02:21.000Z
|
llvm/tests/test_struct_args.py
|
KennethNielsen/llvmpy
|
70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e
|
[
"BSD-3-Clause"
] | 19
|
2015-01-15T14:45:49.000Z
|
2020-09-04T14:58:23.000Z
|
llvm/tests/test_struct_args.py
|
KennethNielsen/llvmpy
|
70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e
|
[
"BSD-3-Clause"
] | 12
|
2015-01-12T01:49:32.000Z
|
2020-07-10T22:30:38.000Z
|
from __future__ import print_function
from . import tests
import sys
import unittest
from ctypes import Structure, c_float, c_double, c_uint8, CFUNCTYPE
from llvm import core as lc
from llvm import ee as le
from .support import (skip_if_win32, skip_if_not_win32, skip_if_not_32bits,
skip_if_not_64bits, skip_if_not_intel_cpu, TestCase)
class TwoDoubleOneByte(Structure):
_fields_ = ('x', c_double), ('y', c_double), ('z', c_uint8)
def __repr__(self):
return '<x=%f y=%f z=%d>' % (self.x, self.y, self.z)
class TwoDouble(Structure):
_fields_ = ('x', c_double), ('y', c_double)
def __repr__(self):
return '<x=%f y=%f>' % (self.x, self.y)
class TwoFloat(Structure):
_fields_ = ('x', c_float), ('y', c_float)
def __repr__(self):
return '<x=%f y=%f>' % (self.x, self.y)
class OneByte(Structure):
_fields_ = [('x', c_uint8)]
def __repr__(self):
return '<x=%d>' % (self.x,)
@skip_if_not_intel_cpu
@skip_if_win32
class TestStructSystemVABI(TestCase):
'''
Non microsoft convention
'''
#----------------------------------------------------------------------
# 64 bits
@skip_if_not_64bits
def test_bigger_than_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_64bits
def test_just_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
struct_type = lc.Type.struct([double_type, double_type])
func_type = lc.Type.function(struct_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDouble, TwoDouble)
cfunc = cfunctype(ptr)
arg = TwoDouble(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_64bits
def test_two_halfwords(self):
'''Arguments smaller or equal to a word is packed into a word.
Passing as struct { float, float } occupies two XMM registers instead
of one.
The output must be in XMM.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.vector(float_type, 2)
func_type = lc.Type.function(struct_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
constint = lambda x: lc.Constant.int(lc.Type.int(), x)
e1, e2 = [builder.extract_element(arg, constint(i))
for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_element(lc.Constant.undef(struct_type), se1,
constint(0))
ret = builder.insert_element(ret, se2, constint(1))
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
#----------------------------------------------------------------------
# 32 bits
@skip_if_not_32bits
def test_structure_abi_32_1(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_32bits
def test_structure_abi_32_2(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_32bits
def test_structure_abi_32_3(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1 = builder.extract_value(arg, 0)
se1 = builder.mul(e1, e1)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(OneByte, OneByte)
cfunc = cfunctype(ptr)
arg = OneByte(x=8)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertEqual(arg.x * arg.x, ret.x)
tests.append(TestStructSystemVABI)
@skip_if_not_intel_cpu
@skip_if_not_win32
class TestStructMicrosoftABI(TestCase):
'''
Microsoft convention
'''
#----------------------------------------------------------------------
# 64 bits
@skip_if_not_64bits
def test_bigger_than_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_64bits
def test_just_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
struct_type = lc.Type.struct([double_type, double_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDouble, TwoDouble)
cfunc = cfunctype(ptr)
arg = TwoDouble(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_64bits
def test_two_halfwords(self):
'''Arguments smaller or equal to a word is packed into a word.
Floats structure are not passed on the XMM.
Treat it as a i64.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
abi_type = lc.Type.int(64)
func_type = lc.Type.function(abi_type, [abi_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
struct_ptr = builder.alloca(struct_type)
struct_int_ptr = builder.bitcast(struct_ptr, lc.Type.pointer(abi_type))
builder.store(arg, struct_int_ptr)
arg = builder.load(struct_ptr)
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, struct_ptr)
ret = builder.load(struct_int_ptr)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
#----------------------------------------------------------------------
# 32 bits
@skip_if_not_32bits
def test_one_word_register(self):
'''Argument is passed by memory.
Return value is passed by register.
'''
m = lc.Module.new('test_struct_arg')
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(struct_type, [struct_ptr_type])
func = m.add_function(func_type, name='foo')
# pass structure by value
func.args[0].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[0])
e1 = builder.extract_value(arg, 0)
se1 = builder.mul(e1, e1)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(OneByte, OneByte)
cfunc = cfunctype(ptr)
arg = OneByte(x=8)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertEqual(arg.x * arg.x, ret.x)
@skip_if_not_32bits
def test_two_floats(self):
'''Argument is passed by register.
Return in 2 registers
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
abi_type = lc.Type.int(64)
func_type = lc.Type.function(abi_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
out_ptr = builder.alloca(struct_type)
arg = func.args[0]
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, out_ptr)
out_int_ptr = builder.bitcast(out_ptr, lc.Type.pointer(abi_type))
builder.ret(builder.load(out_int_ptr))
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_32bits
def test_bigger_than_two_words(self):
'''Pass in memory.
'''
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
tests.append(TestStructMicrosoftABI)
if __name__ == "__main__":
unittest.main()
| 30.319242
| 79
| 0.589499
| 2,778
| 20,799
| 4.227862
| 0.061195
| 0.030651
| 0.042571
| 0.042912
| 0.908216
| 0.898595
| 0.891103
| 0.878757
| 0.87169
| 0.866496
| 0
| 0.028713
| 0.279965
| 20,799
| 685
| 80
| 30.363504
| 0.755542
| 0.088177
| 0
| 0.871495
| 0
| 0
| 0.014725
| 0
| 0
| 0
| 0
| 0
| 0.060748
| 1
| 0.037383
| false
| 0
| 0.018692
| 0.009346
| 0.088785
| 0.086449
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e58bf24cd7669b7cd981a4fd9bd9e971534e156
| 141
|
py
|
Python
|
dpp_nets/dpp/__init__.py
|
mbp28/dpp_nets
|
86859b7612433cc6349b427b47c54986224e702a
|
[
"MIT"
] | 1
|
2021-06-05T11:14:13.000Z
|
2021-06-05T11:14:13.000Z
|
dpp_nets/dpp/__init__.py
|
mbp28/dpp_nets
|
86859b7612433cc6349b427b47c54986224e702a
|
[
"MIT"
] | null | null | null |
dpp_nets/dpp/__init__.py
|
mbp28/dpp_nets
|
86859b7612433cc6349b427b47c54986224e702a
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.linalg import orth
from dpp_nets.dpp.sample_dpp import sample_dpp
from dpp_nets.dpp.score_dpp import score_dpp
| 28.2
| 46
| 0.851064
| 27
| 141
| 4.222222
| 0.444444
| 0.122807
| 0.192982
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113475
| 141
| 5
| 47
| 28.2
| 0.912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5ed08ea65b219d304b654596ea05bb99759d3c12
| 227
|
py
|
Python
|
infinitystone/ui/views/__init__.py
|
HieronymusCrouse/infinitystone
|
8a4bd9b415c0b2267eba45efc04a00d891b1a8d8
|
[
"BSD-3-Clause"
] | 1
|
2018-05-17T15:50:45.000Z
|
2018-05-17T15:50:45.000Z
|
infinitystone/ui/views/__init__.py
|
HieronymusCrouse/infinitystone
|
8a4bd9b415c0b2267eba45efc04a00d891b1a8d8
|
[
"BSD-3-Clause"
] | 32
|
2018-03-22T07:59:29.000Z
|
2019-06-06T13:12:47.000Z
|
infinitystone/ui/views/__init__.py
|
HieronymusCrouse/infinitystone
|
8a4bd9b415c0b2267eba45efc04a00d891b1a8d8
|
[
"BSD-3-Clause"
] | 10
|
2018-02-26T08:17:31.000Z
|
2019-12-27T12:10:00.000Z
|
import infinitystone.ui.views.users
import infinitystone.ui.views.tenants
import infinitystone.ui.views.endpoints
import infinitystone.ui.views.domains
import infinitystone.ui.views.roles
import infinitystone.ui.views.elements
| 32.428571
| 39
| 0.867841
| 30
| 227
| 6.566667
| 0.333333
| 0.57868
| 0.639594
| 0.791878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052863
| 227
| 6
| 40
| 37.833333
| 0.916279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2173e32df857e244d9175d1b85f974fa9ad2be59
| 33
|
py
|
Python
|
nlstruct/recipes/__init__.py
|
percevalw/nlstruct
|
395db91c005536c64eca47a6dab4c5e460a9cfd0
|
[
"MIT"
] | 6
|
2020-02-10T09:02:34.000Z
|
2021-11-22T12:57:23.000Z
|
nlstruct/recipes/__init__.py
|
percevalw/nlstruct
|
395db91c005536c64eca47a6dab4c5e460a9cfd0
|
[
"MIT"
] | null | null | null |
nlstruct/recipes/__init__.py
|
percevalw/nlstruct
|
395db91c005536c64eca47a6dab4c5e460a9cfd0
|
[
"MIT"
] | 4
|
2020-03-04T08:18:39.000Z
|
2022-03-15T12:18:03.000Z
|
from .train_ner import train_ner
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.333333
| 0.666667
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
217e6404f2254f29576772ffe7227b147c385e93
| 9,139
|
py
|
Python
|
platformproject/models.py
|
joe2018/AsiaDataPlatform
|
5ba7d4d3bc24a697d0bda1be10d1fe7ae09f2871
|
[
"MIT"
] | null | null | null |
platformproject/models.py
|
joe2018/AsiaDataPlatform
|
5ba7d4d3bc24a697d0bda1be10d1fe7ae09f2871
|
[
"MIT"
] | null | null | null |
platformproject/models.py
|
joe2018/AsiaDataPlatform
|
5ba7d4d3bc24a697d0bda1be10d1fe7ae09f2871
|
[
"MIT"
] | null | null | null |
from django.db import models
class Menu(models.Model):
keyid = models.IntegerField(verbose_name= '菜单编号')
name = models.CharField(verbose_name= '菜单名',max_length=30,primary_key=True)
mod_name = models.CharField(verbose_name= '所属模块',max_length=30)
power_id = models.IntegerField(verbose_name= '所属权限')
def __str__(self):
return self.name
class Meta:
db_table = 'MENUTABLE'
class user(models.Model):
user_id = models.AutoField(verbose_name= '用户ID',primary_key=True)
user_name = models.CharField(verbose_name= '用户名', max_length=13)
user_hashpas = models.CharField(verbose_name= '密码', max_length=32)
user_key = models.CharField(verbose_name= '识别码', max_length=32)
user_email = models.EmailField(verbose_name= '邮箱')
user_vip = models.IntegerField(verbose_name= '权限等级',default='1')
user_status = models.IntegerField(verbose_name= '账号状态',default='1')
user_reg_time = models.DateTimeField(verbose_name= '注册时间',auto_now_add=True)
class Meta:
db_table = 'USERINFO'
class rof_day_data(models.Model):
id = models.IntegerField(primary_key=True)
channel = models.CharField(verbose_name = '渠道',max_length=300)
dau = models.IntegerField(verbose_name = '老用户数')
loginaccount = models.IntegerField(verbose_name = '活跃')
payrate = models.DecimalField(verbose_name = '付费率',max_digits=8, decimal_places=4)
loginarpu = models.DecimalField(verbose_name = 'arpu',max_digits=8, decimal_places=4)
dayrun = models.DecimalField(verbose_name = '日流水',max_digits=8, decimal_places=4)
payrolenum = models.IntegerField(verbose_name = '付费人数')
payarpu = models.DecimalField(verbose_name = 'arppu',max_digits=8, decimal_places=4)
newaddaccount = models.IntegerField(verbose_name = '新增用户')
dnupay = models.DecimalField(verbose_name = '新玩家付费',max_digits=8, decimal_places=4)
dnupaynum = models.IntegerField(verbose_name = '新玩家付费人数')
dnurate = models.DecimalField(verbose_name = '新玩家付费率',max_digits=8, decimal_places=4)
dnuarppu = models.DecimalField(verbose_name = '新玩家arppu',max_digits=8, decimal_places=4)
dnuarpu = models.DecimalField(verbose_name = '新玩家arpu',max_digits=8, decimal_places=4)
oldpay = models.DecimalField(verbose_name = '老玩家付费',max_digits=8, decimal_places=4)
oldpaynum = models.IntegerField(verbose_name = '老玩家付费人数')
oldrate = models.DecimalField(verbose_name = '老玩家付费率',max_digits=8, decimal_places=4)
oldarppu = models.DecimalField(verbose_name = '老玩家arppu',max_digits=8, decimal_places=4)
oldarpu = models.DecimalField(verbose_name = '老玩家arpu',max_digits=8, decimal_places=4)
operationtime = models.DateTimeField(verbose_name = '日期')
tworemain = models.DecimalField(verbose_name = '2留',max_digits=8, decimal_places=4)
threeremain = models.DecimalField(verbose_name = '3留',max_digits=8, decimal_places=4)
fourremain = models.DecimalField(verbose_name = '4留',max_digits=8, decimal_places=4)
fiveremain = models.DecimalField(verbose_name = '5留',max_digits=8, decimal_places=4)
sixremain = models.DecimalField(verbose_name = '6留',max_digits=8, decimal_places=4)
sevenremain = models.DecimalField(verbose_name = '7留',max_digits=8, decimal_places=4)
fourteenremain = models.DecimalField(verbose_name = '14留',max_digits=8, decimal_places=4)
monthremain = models.DecimalField(verbose_name = '月留',max_digits=8, decimal_places=4)
twoLTV = models.DecimalField(verbose_name = 'LTV2',max_digits=8, decimal_places=4)
threeLTV = models.DecimalField(verbose_name = 'LTV3',max_digits=8, decimal_places=4)
fourLTV = models.DecimalField(verbose_name = 'LTV4',max_digits=8, decimal_places=4)
fiveLTV = models.DecimalField(verbose_name = 'LTV5',max_digits=8, decimal_places=4)
sixLTV = models.DecimalField(verbose_name = 'LTV6',max_digits=8, decimal_places=4)
sevenLTV = models.DecimalField(verbose_name = 'LTV7',max_digits=8, decimal_places=4)
fourteenLTV = models.DecimalField(verbose_name = 'LTV14',max_digits=8, decimal_places=4)
monthLTV = models.DecimalField(verbose_name = 'LTV30',max_digits=8, decimal_places=4)
twomonthLTV = models.DecimalField(verbose_name = 'LTV60',max_digits=8, decimal_places=4)
exchangemoney = models.DecimalField(verbose_name = '外币',max_digits=15, decimal_places=4)
class Meta:
db_table = 'rof_day_data'
ordering = ['operationtime']
class rofid_day_data(models.Model):
id = models.IntegerField(primary_key=True)
channel = models.CharField(verbose_name = '渠道',max_length=300)
dau = models.IntegerField(verbose_name = '老用户数')
loginaccount = models.IntegerField(verbose_name = '活跃')
payrate = models.DecimalField(verbose_name = '付费率',max_digits=8, decimal_places=4)
loginarpu = models.DecimalField(verbose_name = 'arpu',max_digits=8, decimal_places=4)
dayrun = models.DecimalField(verbose_name = '日流水',max_digits=8, decimal_places=4)
payrolenum = models.IntegerField(verbose_name = '付费人数')
payarpu = models.DecimalField(verbose_name = 'arppu',max_digits=8, decimal_places=4)
newaddaccount = models.IntegerField(verbose_name = '新增用户')
dnupay = models.DecimalField(verbose_name = '新玩家付费',max_digits=8, decimal_places=4)
dnupaynum = models.IntegerField(verbose_name = '新玩家付费人数')
dnurate = models.DecimalField(verbose_name = '新玩家付费率',max_digits=8, decimal_places=4)
dnuarppu = models.DecimalField(verbose_name = '新玩家arppu',max_digits=8, decimal_places=4)
dnuarpu = models.DecimalField(verbose_name = '新玩家arpu',max_digits=8, decimal_places=4)
oldpay = models.DecimalField(verbose_name = '老玩家付费',max_digits=8, decimal_places=4)
oldpaynum = models.IntegerField(verbose_name = '老玩家付费人数')
oldrate = models.DecimalField(verbose_name = '老玩家付费率',max_digits=8, decimal_places=4)
oldarppu = models.DecimalField(verbose_name = '老玩家arppu',max_digits=8, decimal_places=4)
oldarpu = models.DecimalField(verbose_name = '老玩家arpu',max_digits=8, decimal_places=4)
operationtime = models.DateTimeField(verbose_name = '日期')
tworemain = models.DecimalField(verbose_name = '2留',max_digits=8, decimal_places=4)
threeremain = models.DecimalField(verbose_name = '3留',max_digits=8, decimal_places=4)
fourremain = models.DecimalField(verbose_name = '4留',max_digits=8, decimal_places=4)
fiveremain = models.DecimalField(verbose_name = '5留',max_digits=8, decimal_places=4)
sixremain = models.DecimalField(verbose_name = '6留',max_digits=8, decimal_places=4)
sevenremain = models.DecimalField(verbose_name = '7留',max_digits=8, decimal_places=4)
fourteenremain = models.DecimalField(verbose_name = '14留',max_digits=8, decimal_places=4)
monthremain = models.DecimalField(verbose_name = '月留',max_digits=8, decimal_places=4)
twoLTV = models.DecimalField(verbose_name = 'LTV2',max_digits=8, decimal_places=4)
threeLTV = models.DecimalField(verbose_name = 'LTV3',max_digits=8, decimal_places=4)
fourLTV = models.DecimalField(verbose_name = 'LTV4',max_digits=8, decimal_places=4)
fiveLTV = models.DecimalField(verbose_name = 'LTV5',max_digits=8, decimal_places=4)
sixLTV = models.DecimalField(verbose_name = 'LTV6',max_digits=8, decimal_places=4)
sevenLTV = models.DecimalField(verbose_name = 'LTV7',max_digits=8, decimal_places=4)
fourteenLTV = models.DecimalField(verbose_name = 'LTV14',max_digits=8, decimal_places=4)
monthLTV = models.DecimalField(verbose_name = 'LTV30',max_digits=8, decimal_places=4)
twomonthLTV = models.DecimalField(verbose_name = 'LTV60',max_digits=8, decimal_places=4)
exchangemoney = models.DecimalField(verbose_name = '外币',max_digits=15, decimal_places=4)
class Meta:
db_table = 'rofid_day_data'
ordering = ['operationtime']
class e3kid_day_data(models.Model):
id = models.IntegerField(primary_key=True)
operationtime = models.DateTimeField(verbose_name='日期')
channel = models.CharField(verbose_name = '渠道',max_length=300)
dau = models.IntegerField(verbose_name = '活跃')
loginaccount = models.IntegerField(verbose_name = '登入次数')
dnu = models.IntegerField(verbose_name='新增用户')
dayrun = models.DecimalField(verbose_name='日流水', max_digits=8, decimal_places=4)
dnupay = models.DecimalField(verbose_name='新玩家付费', max_digits=8, decimal_places=4)
f_pay = models.DecimalField(verbose_name='首冲金额', max_digits=8, decimal_places=4)
payrolenum = models.IntegerField(verbose_name='付费人数')
dnupaynum = models.IntegerField(verbose_name='新玩家付费人数')
f_paynum = models.DecimalField(verbose_name='首冲金额', max_digits=8, decimal_places=4)
paynum = models.IntegerField(verbose_name='充值次数')
dnupaycount = models.IntegerField(verbose_name='新用户充值次数')
arppu = models.DecimalField(verbose_name='arppu', max_digits=8, decimal_places=4)
arpu = models.DecimalField(verbose_name='arpu', max_digits=8, decimal_places=4)
AVEdnupay = models.DecimalField(verbose_name='新用户平均付费', max_digits=8, decimal_places=4)
payrate = models.DecimalField(verbose_name = '付费率',max_digits=8, decimal_places=4)
class Meta:
db_table = 'e3kid_day_data'
ordering = ['operationtime']
| 64.359155
| 93
| 0.750739
| 1,181
| 9,139
| 5.563929
| 0.135478
| 0.175772
| 0.258713
| 0.300107
| 0.872318
| 0.837011
| 0.819662
| 0.817227
| 0.817227
| 0.817227
| 0
| 0.02521
| 0.127585
| 9,139
| 142
| 94
| 64.359155
| 0.798946
| 0
| 0
| 0.728682
| 0
| 0
| 0.05733
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007752
| false
| 0
| 0.007752
| 0.007752
| 0.937985
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
0dff73442ce34d77fdb47c5485c449a43de270bc
| 105
|
py
|
Python
|
il2_rest/__init__.py
|
Abdur-rahmaanJ/interlockledger-rest-client-python
|
844bd283cea6c5f4ca3453f77ce208e692bb5e44
|
[
"BSD-3-Clause"
] | 3
|
2021-03-31T18:47:43.000Z
|
2021-09-23T18:40:09.000Z
|
il2_rest/__init__.py
|
Abdur-rahmaanJ/interlockledger-rest-client-python
|
844bd283cea6c5f4ca3453f77ce208e692bb5e44
|
[
"BSD-3-Clause"
] | 4
|
2021-03-31T22:21:08.000Z
|
2022-03-28T18:54:51.000Z
|
il2_rest/__init__.py
|
Abdur-rahmaanJ/interlockledger-rest-client-python
|
844bd283cea6c5f4ca3453f77ce208e692bb5e44
|
[
"BSD-3-Clause"
] | 1
|
2021-09-27T05:16:16.000Z
|
2021-09-27T05:16:16.000Z
|
import json
from .client import RestNode
from .client import RestNetwork
from .client import RestChain
| 15
| 31
| 0.819048
| 14
| 105
| 6.142857
| 0.5
| 0.348837
| 0.55814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152381
| 105
| 6
| 32
| 17.5
| 0.966292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
df1913bce449880ea8b822e4852bafc04c84f649
| 63,176
|
py
|
Python
|
atom/nucleus/python/nucleus_api/api/utils_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/utils_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/utils_api.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nucleus_api.api_client import ApiClient
class UtilsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_account_status_using_post(self, account_status_request, **kwargs): # noqa: E501
"""Create an account status # noqa: E501
Create an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_status_using_post(account_status_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status_request: accountStatusRequest (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_account_status_using_post_with_http_info(account_status_request, **kwargs) # noqa: E501
else:
(data) = self.create_account_status_using_post_with_http_info(account_status_request, **kwargs) # noqa: E501
return data
def create_account_status_using_post_with_http_info(self, account_status_request, **kwargs): # noqa: E501
"""Create an account status # noqa: E501
Create an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_status_using_post_with_http_info(account_status_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status_request: accountStatusRequest (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_status_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_request' is set
if ('account_status_request' not in params or
params['account_status_request'] is None):
raise ValueError("Missing the required parameter `account_status_request` when calling `create_account_status_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_status_request' in params:
body_params = params['account_status_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/account_status', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_stage_using_post(self, stage_request, **kwargs): # noqa: E501
"""Create an account stage # noqa: E501
Create a new account stage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_stage_using_post(stage_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Stage stage_request: stageRequest (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_stage_using_post_with_http_info(stage_request, **kwargs) # noqa: E501
else:
(data) = self.create_stage_using_post_with_http_info(stage_request, **kwargs) # noqa: E501
return data
def create_stage_using_post_with_http_info(self, stage_request, **kwargs): # noqa: E501
"""Create an account stage # noqa: E501
Create a new account stage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_stage_using_post_with_http_info(stage_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Stage stage_request: stageRequest (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stage_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_stage_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stage_request' is set
if ('stage_request' not in params or
params['stage_request'] is None):
raise ValueError("Missing the required parameter `stage_request` when calling `create_stage_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'stage_request' in params:
body_params = params['stage_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/stage', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Stage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_transaction_code_using_post(self, transaction_request, **kwargs): # noqa: E501
"""Create a transaction code # noqa: E501
Create a new transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_transaction_code_using_post(transaction_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TransactionCode transaction_request: transactionRequest (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_transaction_code_using_post_with_http_info(transaction_request, **kwargs) # noqa: E501
else:
(data) = self.create_transaction_code_using_post_with_http_info(transaction_request, **kwargs) # noqa: E501
return data
def create_transaction_code_using_post_with_http_info(self, transaction_request, **kwargs): # noqa: E501
"""Create a transaction code # noqa: E501
Create a new transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_transaction_code_using_post_with_http_info(transaction_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TransactionCode transaction_request: transactionRequest (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['transaction_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_transaction_code_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'transaction_request' is set
if ('transaction_request' not in params or
params['transaction_request'] is None):
raise ValueError("Missing the required parameter `transaction_request` when calling `create_transaction_code_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'transaction_request' in params:
body_params = params['transaction_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/transaction_code', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionCode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_status_using_delete(self, account_status_id, **kwargs): # noqa: E501
"""Delete an account status # noqa: E501
Permanently delete an account status record from an account’s history. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_status_using_delete(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_status_using_delete_with_http_info(account_status_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_status_using_delete_with_http_info(account_status_id, **kwargs) # noqa: E501
return data
def delete_account_status_using_delete_with_http_info(self, account_status_id, **kwargs): # noqa: E501
"""Delete an account status # noqa: E501
Permanently delete an account status record from an account’s history. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_status_using_delete_with_http_info(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_status_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_id' is set
if ('account_status_id' not in params or
params['account_status_id'] is None):
raise ValueError("Missing the required parameter `account_status_id` when calling `delete_account_status_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/account_status/{account_status_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_stage_using_delete(self, stage_id, **kwargs): # noqa: E501
"""Delete an account stage # noqa: E501
Permanently delete an account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_stage_using_delete(stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stage_id: UUID stage_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_stage_using_delete_with_http_info(stage_id, **kwargs) # noqa: E501
else:
(data) = self.delete_stage_using_delete_with_http_info(stage_id, **kwargs) # noqa: E501
return data
def delete_stage_using_delete_with_http_info(self, stage_id, **kwargs): # noqa: E501
"""Delete an account stage # noqa: E501
Permanently delete an account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_stage_using_delete_with_http_info(stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stage_id: UUID stage_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_stage_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stage_id' is set
if ('stage_id' not in params or
params['stage_id'] is None):
raise ValueError("Missing the required parameter `stage_id` when calling `delete_stage_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stage_id' in params:
path_params['stage_id'] = params['stage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/stage/{stage_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_transaction_code_using_delete(self, transaction_code_id, **kwargs): # noqa: E501
"""Delete a transaction code # noqa: E501
Permanently delete a transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_transaction_code_using_delete(transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str transaction_code_id: UUID transaction_code_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_transaction_code_using_delete_with_http_info(transaction_code_id, **kwargs) # noqa: E501
else:
(data) = self.delete_transaction_code_using_delete_with_http_info(transaction_code_id, **kwargs) # noqa: E501
return data
def delete_transaction_code_using_delete_with_http_info(self, transaction_code_id, **kwargs): # noqa: E501
"""Delete a transaction code # noqa: E501
Permanently delete a transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_transaction_code_using_delete_with_http_info(transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str transaction_code_id: UUID transaction_code_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['transaction_code_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_transaction_code_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'transaction_code_id' is set
if ('transaction_code_id' not in params or
params['transaction_code_id'] is None):
raise ValueError("Missing the required parameter `transaction_code_id` when calling `delete_transaction_code_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'transaction_code_id' in params:
path_params['transaction_code_id'] = params['transaction_code_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/transaction_code/{transaction_code_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_status_all_using_get(self, **kwargs): # noqa: E501
"""List all account statuses # noqa: E501
Get the account status history information for all accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_status_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_account_status_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_account_status_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account statuses # noqa: E501
Get the account status history information for all accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_status_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/account_status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_status_using_get(self, account_status_id, **kwargs): # noqa: E501
"""Retrieve an account status # noqa: E501
Retrieve the information for a specific account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_using_get(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_status_using_get_with_http_info(account_status_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_status_using_get_with_http_info(account_status_id, **kwargs) # noqa: E501
return data
def get_account_status_using_get_with_http_info(self, account_status_id, **kwargs): # noqa: E501
"""Retrieve an account status # noqa: E501
Retrieve the information for a specific account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_using_get_with_http_info(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_status_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_id' is set
if ('account_status_id' not in params or
params['account_status_id'] is None):
raise ValueError("Missing the required parameter `account_status_id` when calling `get_account_status_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/account_status/{account_status_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stage_all_using_get(self, **kwargs): # noqa: E501
"""List all account stages # noqa: E501
Get the information for all possible account stages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stage_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageStage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_stage_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_stage_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_stage_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account stages # noqa: E501
Get the information for all possible account stages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stage_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageStage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stage_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/stage', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageStage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stage_using_get(self, stage_id, **kwargs): # noqa: E501
"""Retrieve an account stage # noqa: E501
Retrieve the information for a specific account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stage_using_get(stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stage_id: UUID stage_id (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_stage_using_get_with_http_info(stage_id, **kwargs) # noqa: E501
else:
(data) = self.get_stage_using_get_with_http_info(stage_id, **kwargs) # noqa: E501
return data
def get_stage_using_get_with_http_info(self, stage_id, **kwargs): # noqa: E501
"""Retrieve an account stage # noqa: E501
Retrieve the information for a specific account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stage_using_get_with_http_info(stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str stage_id: UUID stage_id (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stage_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stage_id' is set
if ('stage_id' not in params or
params['stage_id'] is None):
raise ValueError("Missing the required parameter `stage_id` when calling `get_stage_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stage_id' in params:
path_params['stage_id'] = params['stage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/stage/{stage_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Stage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_transaction_code_all_using_get(self, **kwargs): # noqa: E501
"""List all transaction codes # noqa: E501
Get the information for all transaction codes defined by your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_transaction_code_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageTransactionCode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_transaction_code_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_transaction_code_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_transaction_code_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all transaction codes # noqa: E501
Get the information for all transaction codes defined by your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_transaction_code_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageTransactionCode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction_code_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/transaction_code', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageTransactionCode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_transaction_code_using_get(self, transaction_code_id, **kwargs): # noqa: E501
"""Retrieve a transaction code # noqa: E501
Retrieve the information for a transaction code defined by your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_transaction_code_using_get(transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str transaction_code_id: UUID transaction_code_id (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_transaction_code_using_get_with_http_info(transaction_code_id, **kwargs) # noqa: E501
else:
(data) = self.get_transaction_code_using_get_with_http_info(transaction_code_id, **kwargs) # noqa: E501
return data
def get_transaction_code_using_get_with_http_info(self, transaction_code_id, **kwargs): # noqa: E501
"""Retrieve a transaction code # noqa: E501
Retrieve the information for a transaction code defined by your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_transaction_code_using_get_with_http_info(transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str transaction_code_id: UUID transaction_code_id (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['transaction_code_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction_code_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'transaction_code_id' is set
if ('transaction_code_id' not in params or
params['transaction_code_id'] is None):
raise ValueError("Missing the required parameter `transaction_code_id` when calling `get_transaction_code_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'transaction_code_id' in params:
path_params['transaction_code_id'] = params['transaction_code_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/transaction_code/{transaction_code_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionCode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account_status_using_put(self, account_status, account_status_id, **kwargs): # noqa: E501
"""Update an account status # noqa: E501
Update an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_status_using_put(account_status, account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status: account_status (required)
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_account_status_using_put_with_http_info(account_status, account_status_id, **kwargs) # noqa: E501
else:
(data) = self.update_account_status_using_put_with_http_info(account_status, account_status_id, **kwargs) # noqa: E501
return data
def update_account_status_using_put_with_http_info(self, account_status, account_status_id, **kwargs): # noqa: E501
"""Update an account status # noqa: E501
Update an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_status_using_put_with_http_info(account_status, account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status: account_status (required)
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status', 'account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account_status_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status' is set
if ('account_status' not in params or
params['account_status'] is None):
raise ValueError("Missing the required parameter `account_status` when calling `update_account_status_using_put`") # noqa: E501
# verify the required parameter 'account_status_id' is set
if ('account_status_id' not in params or
params['account_status_id'] is None):
raise ValueError("Missing the required parameter `account_status_id` when calling `update_account_status_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_status' in params:
body_params = params['account_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/account_status/{account_status_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_stage_using_put(self, stage, stage_id, **kwargs): # noqa: E501
"""Update an account stage # noqa: E501
Update the information for an account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_stage_using_put(stage, stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Stage stage: stage (required)
:param str stage_id: UUID stage_id (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_stage_using_put_with_http_info(stage, stage_id, **kwargs) # noqa: E501
else:
(data) = self.update_stage_using_put_with_http_info(stage, stage_id, **kwargs) # noqa: E501
return data
def update_stage_using_put_with_http_info(self, stage, stage_id, **kwargs): # noqa: E501
"""Update an account stage # noqa: E501
Update the information for an account stage. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_stage_using_put_with_http_info(stage, stage_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Stage stage: stage (required)
:param str stage_id: UUID stage_id (required)
:return: Stage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stage', 'stage_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_stage_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stage' is set
if ('stage' not in params or
params['stage'] is None):
raise ValueError("Missing the required parameter `stage` when calling `update_stage_using_put`") # noqa: E501
# verify the required parameter 'stage_id' is set
if ('stage_id' not in params or
params['stage_id'] is None):
raise ValueError("Missing the required parameter `stage_id` when calling `update_stage_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'stage_id' in params:
path_params['stage_id'] = params['stage_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'stage' in params:
body_params = params['stage']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/stage/{stage_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Stage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_transaction_code_using_put(self, transaction_code, transaction_code_id, **kwargs): # noqa: E501
"""Update a transaction code # noqa: E501
Update a transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_transaction_code_using_put(transaction_code, transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TransactionCode transaction_code: transaction_code (required)
:param str transaction_code_id: UUID transaction_code_id (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_transaction_code_using_put_with_http_info(transaction_code, transaction_code_id, **kwargs) # noqa: E501
else:
(data) = self.update_transaction_code_using_put_with_http_info(transaction_code, transaction_code_id, **kwargs) # noqa: E501
return data
def update_transaction_code_using_put_with_http_info(self, transaction_code, transaction_code_id, **kwargs): # noqa: E501
"""Update a transaction code # noqa: E501
Update a transaction code for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_transaction_code_using_put_with_http_info(transaction_code, transaction_code_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param TransactionCode transaction_code: transaction_code (required)
:param str transaction_code_id: UUID transaction_code_id (required)
:return: TransactionCode
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['transaction_code', 'transaction_code_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_transaction_code_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'transaction_code' is set
if ('transaction_code' not in params or
params['transaction_code'] is None):
raise ValueError("Missing the required parameter `transaction_code` when calling `update_transaction_code_using_put`") # noqa: E501
# verify the required parameter 'transaction_code_id' is set
if ('transaction_code_id' not in params or
params['transaction_code_id'] is None):
raise ValueError("Missing the required parameter `transaction_code_id` when calling `update_transaction_code_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'transaction_code_id' in params:
path_params['transaction_code_id'] = params['transaction_code_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'transaction_code' in params:
body_params = params['transaction_code']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/transaction_code/{transaction_code_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionCode', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.917098
| 150
| 0.623987
| 7,336
| 63,176
| 5.072383
| 0.025354
| 0.049878
| 0.022574
| 0.029024
| 0.983688
| 0.974577
| 0.963102
| 0.954422
| 0.943565
| 0.928731
| 0
| 0.016051
| 0.289968
| 63,176
| 1,543
| 151
| 40.943616
| 0.813496
| 0.32802
| 0
| 0.805054
| 1
| 0
| 0.195844
| 0.059045
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037304
| false
| 0
| 0.004813
| 0
| 0.097473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
df53ad4fcbf69c754e2d3eab5b9383a90b5ca20f
| 44,216
|
py
|
Python
|
char_numbers.py
|
nahog/pico-tetris
|
75e3766fd14b660904c79132acb9bedd30e8c4ed
|
[
"0BSD"
] | 5
|
2021-02-17T22:57:40.000Z
|
2021-05-29T21:15:52.000Z
|
char_numbers.py
|
nahog/pico-tetris
|
75e3766fd14b660904c79132acb9bedd30e8c4ed
|
[
"0BSD"
] | null | null | null |
char_numbers.py
|
nahog/pico-tetris
|
75e3766fd14b660904c79132acb9bedd30e8c4ed
|
[
"0BSD"
] | null | null | null |
class Numbers:
def __init__(self, display, fg_color, bg_color=None):
self._display = display
self.fg_color = fg_color
self.bg_color = bg_color
self._all_height = 11
self._all_width = 19
def draw(self, number, x, y):
str_number = str(number)
clear_height = 0
for i in str_number:
clear_height += self._all_height
if self.bg_color != None:
self.bg_color.enable_color()
self._display.rectangle(x, y-clear_height+1, self._all_width, clear_height)
self.fg_color.enable_color()
next_y = y
for i in str_number:
next_y = self._draw_number(i, x, next_y)
def _draw_number(self, number, x, y):
if number == "1":
# Line 1
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
# Line 3
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-4)
self._display.pixel(x+2, y-5)
# Line 4
self._display.pixel(x+3, y-3)
self._display.pixel(x+3, y-4)
self._display.pixel(x+3, y-5)
# Line 5
self._display.pixel(x+4, y-3)
self._display.pixel(x+4, y-4)
self._display.pixel(x+4, y-5)
# Line 6
self._display.pixel(x+5, y-3)
self._display.pixel(x+5, y-4)
self._display.pixel(x+5, y-5)
# Line 7
self._display.pixel(x+6, y-3)
self._display.pixel(x+6, y-4)
self._display.pixel(x+6, y-5)
# Line 8
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
# Line 9
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
# Line 10
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
# Line 11
self._display.pixel(x+10, y-3)
self._display.pixel(x+10, y-4)
self._display.pixel(x+10, y-5)
# Line 12
self._display.pixel(x+11, y-3)
self._display.pixel(x+11, y-4)
self._display.pixel(x+11, y-5)
# Line 13
self._display.pixel(x+12, y-3)
self._display.pixel(x+12, y-4)
self._display.pixel(x+12, y-5)
# Line 14
self._display.pixel(x+13, y-3)
self._display.pixel(x+13, y-4)
self._display.pixel(x+13, y-5)
# Line 15
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-4)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
# Line 16
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
self._display.pixel(x+16, y-7)
return y-11
elif number == "2":
# Line 1
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
self._display.pixel(x+7, y-8)
# Line 9
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
self._display.pixel(x+8, y-7)
self._display.pixel(x+8, y-8)
# Line 10
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
# Line 11
self._display.pixel(x+10, y-3)
self._display.pixel(x+10, y-4)
self._display.pixel(x+10, y-5)
self._display.pixel(x+10, y-6)
# Line 12
self._display.pixel(x+11, y-2)
self._display.pixel(x+11, y-3)
self._display.pixel(x+11, y-4)
self._display.pixel(x+11, y-5)
# Line 13
self._display.pixel(x+12, y-1)
self._display.pixel(x+12, y-2)
self._display.pixel(x+12, y-3)
self._display.pixel(x+12, y-4)
# Line 14
self._display.pixel(x+13, y)
self._display.pixel(x+13, y-1)
self._display.pixel(x+13, y-2)
self._display.pixel(x+13, y-3)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-4)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
self._display.pixel(x+15, y-8)
# Line 17
self._display.pixel(x+16, y)
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
self._display.pixel(x+16, y-7)
self._display.pixel(x+16, y-8)
return y-11
elif number == "3":
# Line 1
self._display.pixel(x, y)
self._display.pixel(x, y-1)
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
# Line 2
self._display.pixel(x+1, y)
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y-5)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y-5)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
# Line 9
self._display.pixel(x+8, y-1)
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
# Line 10
self._display.pixel(x+9, y-1)
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
# Line 11
self._display.pixel(x+10, y-5)
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y-5)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-4)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y)
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
elif number == "4":
# Line 1
self._display.pixel(x, y)
self._display.pixel(x, y-1)
self._display.pixel(x, y-2)
self._display.pixel(x, y-6)
self._display.pixel(x, y-7)
self._display.pixel(x, y-8)
# Line 2
self._display.pixel(x+1, y)
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
self._display.pixel(x+1, y-8)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
self._display.pixel(x+5, y-3)
self._display.pixel(x+5, y-4)
self._display.pixel(x+5, y-5)
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
self._display.pixel(x+6, y-3)
self._display.pixel(x+6, y-4)
self._display.pixel(x+6, y-5)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y)
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
self._display.pixel(x+7, y-8)
# Line 9
self._display.pixel(x+8, y-6)
self._display.pixel(x+8, y-7)
self._display.pixel(x+8, y-8)
# Line 10
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
self._display.pixel(x+9, y-8)
# Line 11
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
self._display.pixel(x+15, y-8)
# Line 17
self._display.pixel(x+16, y-6)
self._display.pixel(x+16, y-7)
self._display.pixel(x+16, y-8)
return y-11
elif number == "5":
# Line 1
self._display.pixel(x, y)
self._display.pixel(x, y-1)
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
self._display.pixel(x, y-7)
self._display.pixel(x, y-8)
# Line 2
self._display.pixel(x+1, y)
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
self._display.pixel(x+1, y-8)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-4)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
# Line 8
self._display.pixel(x+7, y)
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
# Line 9
self._display.pixel(x+8, y-1)
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
# Line 10
self._display.pixel(x+9, y-1)
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
# Line 11
self._display.pixel(x+10, y-5)
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y-5)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
self._display.pixel(x+15, y-8)
# Line 16
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y)
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
elif number == "6":
# Line 1
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
self._display.pixel(x, y-7)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
self._display.pixel(x+1, y-8)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-4)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-3)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
# Line 8
self._display.pixel(x+7, y)
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
# Line 9
self._display.pixel(x+8, y)
self._display.pixel(x+8, y-1)
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
self._display.pixel(x+8, y-7)
# Line 10
self._display.pixel(x+9, y)
self._display.pixel(x+9, y-1)
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
self._display.pixel(x+9, y-8)
# Line 11
self._display.pixel(x+10, y)
self._display.pixel(x+10, y-1)
self._display.pixel(x+10, y-2)
self._display.pixel(x+10, y-3)
self._display.pixel(x+10, y-5)
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y)
self._display.pixel(x+11, y-1)
self._display.pixel(x+11, y-2)
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y)
self._display.pixel(x+12, y-1)
self._display.pixel(x+12, y-2)
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y)
self._display.pixel(x+13, y-1)
self._display.pixel(x+13, y-2)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
elif number == "7":
# Line 1
self._display.pixel(x, y)
self._display.pixel(x, y-1)
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
self._display.pixel(x, y-7)
self._display.pixel(x, y-8)
# Line 2
self._display.pixel(x+1, y)
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
self._display.pixel(x+1, y-8)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-4)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y-5)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
# Line 9
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
# Line 10
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
# Line 11
self._display.pixel(x+10, y-2)
self._display.pixel(x+10, y-3)
self._display.pixel(x+10, y-4)
# Line 12
self._display.pixel(x+11, y-1)
self._display.pixel(x+11, y-2)
self._display.pixel(x+11, y-3)
# Line 13
self._display.pixel(x+12, y-1)
self._display.pixel(x+12, y-2)
self._display.pixel(x+12, y-3)
# Line 14
self._display.pixel(x+13, y)
self._display.pixel(x+13, y-1)
self._display.pixel(x+13, y-2)
self._display.pixel(x+13, y-3)
# Line 15
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
# Line 16
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
# Line 17
self._display.pixel(x+16, y)
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
return y-11
elif number == "8":
# Line 1
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
self._display.pixel(x+6, y-3)
self._display.pixel(x+6, y-5)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
# Line 9
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
# Line 10
self._display.pixel(x+9, y-1)
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
# Line 11
self._display.pixel(x+10, y)
self._display.pixel(x+10, y-1)
self._display.pixel(x+10, y-2)
self._display.pixel(x+10, y-3)
self._display.pixel(x+10, y-5)
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y)
self._display.pixel(x+11, y-1)
self._display.pixel(x+11, y-2)
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y)
self._display.pixel(x+12, y-1)
self._display.pixel(x+12, y-2)
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y)
self._display.pixel(x+13, y-1)
self._display.pixel(x+13, y-2)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
elif number == "9":
# Line 1
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
self._display.pixel(x+6, y-3)
self._display.pixel(x+6, y-5)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y)
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-3)
self._display.pixel(x+7, y-4)
self._display.pixel(x+7, y-5)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
self._display.pixel(x+7, y-8)
# Line 9
self._display.pixel(x+8, y-1)
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-3)
self._display.pixel(x+8, y-4)
self._display.pixel(x+8, y-5)
self._display.pixel(x+8, y-6)
self._display.pixel(x+8, y-7)
self._display.pixel(x+8, y-8)
# Line 10
self._display.pixel(x+9, y-3)
self._display.pixel(x+9, y-4)
self._display.pixel(x+9, y-5)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
self._display.pixel(x+9, y-8)
# Line 11
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y-5)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-4)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y)
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y-1)
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
elif number == "0":
# Line 1
self._display.pixel(x, y-2)
self._display.pixel(x, y-3)
self._display.pixel(x, y-4)
self._display.pixel(x, y-5)
self._display.pixel(x, y-6)
# Line 2
self._display.pixel(x+1, y-1)
self._display.pixel(x+1, y-2)
self._display.pixel(x+1, y-3)
self._display.pixel(x+1, y-4)
self._display.pixel(x+1, y-5)
self._display.pixel(x+1, y-6)
self._display.pixel(x+1, y-7)
# Line 3
self._display.pixel(x+2, y)
self._display.pixel(x+2, y-1)
self._display.pixel(x+2, y-2)
self._display.pixel(x+2, y-3)
self._display.pixel(x+2, y-5)
self._display.pixel(x+2, y-6)
self._display.pixel(x+2, y-7)
self._display.pixel(x+2, y-8)
# Line 4
self._display.pixel(x+3, y)
self._display.pixel(x+3, y-1)
self._display.pixel(x+3, y-2)
self._display.pixel(x+3, y-6)
self._display.pixel(x+3, y-7)
self._display.pixel(x+3, y-8)
# Line 5
self._display.pixel(x+4, y)
self._display.pixel(x+4, y-1)
self._display.pixel(x+4, y-2)
self._display.pixel(x+4, y-6)
self._display.pixel(x+4, y-7)
self._display.pixel(x+4, y-8)
# Line 6
self._display.pixel(x+5, y)
self._display.pixel(x+5, y-1)
self._display.pixel(x+5, y-2)
self._display.pixel(x+5, y-6)
self._display.pixel(x+5, y-7)
self._display.pixel(x+5, y-8)
# Line 7
self._display.pixel(x+6, y)
self._display.pixel(x+6, y-1)
self._display.pixel(x+6, y-2)
self._display.pixel(x+6, y-6)
self._display.pixel(x+6, y-7)
self._display.pixel(x+6, y-8)
# Line 8
self._display.pixel(x+7, y)
self._display.pixel(x+7, y-1)
self._display.pixel(x+7, y-2)
self._display.pixel(x+7, y-6)
self._display.pixel(x+7, y-7)
self._display.pixel(x+7, y-8)
# Line 9
self._display.pixel(x+8, y)
self._display.pixel(x+8, y-1)
self._display.pixel(x+8, y-2)
self._display.pixel(x+8, y-6)
self._display.pixel(x+8, y-7)
self._display.pixel(x+8, y-8)
# Line 10
self._display.pixel(x+9, y)
self._display.pixel(x+9, y-1)
self._display.pixel(x+9, y-2)
self._display.pixel(x+9, y-6)
self._display.pixel(x+9, y-7)
self._display.pixel(x+9, y-8)
# Line 11
self._display.pixel(x+10, y)
self._display.pixel(x+10, y-1)
self._display.pixel(x+10, y-2)
self._display.pixel(x+10, y-6)
self._display.pixel(x+10, y-7)
self._display.pixel(x+10, y-8)
# Line 12
self._display.pixel(x+11, y)
self._display.pixel(x+11, y-1)
self._display.pixel(x+11, y-2)
self._display.pixel(x+11, y-6)
self._display.pixel(x+11, y-7)
self._display.pixel(x+11, y-8)
# Line 13
self._display.pixel(x+12, y)
self._display.pixel(x+12, y-1)
self._display.pixel(x+12, y-2)
self._display.pixel(x+12, y-6)
self._display.pixel(x+12, y-7)
self._display.pixel(x+12, y-8)
# Line 14
self._display.pixel(x+13, y)
self._display.pixel(x+13, y-1)
self._display.pixel(x+13, y-2)
self._display.pixel(x+13, y-6)
self._display.pixel(x+13, y-7)
self._display.pixel(x+13, y-8)
# Line 15
self._display.pixel(x+14, y)
self._display.pixel(x+14, y-1)
self._display.pixel(x+14, y-2)
self._display.pixel(x+14, y-3)
self._display.pixel(x+14, y-5)
self._display.pixel(x+14, y-6)
self._display.pixel(x+14, y-7)
self._display.pixel(x+14, y-8)
# Line 16
self._display.pixel(x+15, y-1)
self._display.pixel(x+15, y-2)
self._display.pixel(x+15, y-3)
self._display.pixel(x+15, y-4)
self._display.pixel(x+15, y-5)
self._display.pixel(x+15, y-6)
self._display.pixel(x+15, y-7)
# Line 17
self._display.pixel(x+16, y-2)
self._display.pixel(x+16, y-3)
self._display.pixel(x+16, y-4)
self._display.pixel(x+16, y-5)
self._display.pixel(x+16, y-6)
return y-11
else:
return y-3
| 38.482158
| 87
| 0.484123
| 6,900
| 44,216
| 2.961449
| 0.007246
| 0.503866
| 0.730547
| 0.776206
| 0.979886
| 0.976559
| 0.971665
| 0.970246
| 0.963394
| 0.963394
| 0
| 0.084641
| 0.364597
| 44,216
| 1,148
| 88
| 38.515679
| 0.642677
| 0.0287
| 0
| 0.963077
| 0
| 0
| 0.000234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003077
| false
| 0
| 0
| 0
| 0.015385
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
df6ddebf324743fc28ab00e88e930e0d3ef5ed90
| 5,087
|
py
|
Python
|
src/kernel/quantum_utils.py
|
alexk101/SeQUeNCe
|
3ae6a9c0f787e65b905fd28de29303af0c9420c1
|
[
"BSD-3-Clause"
] | 35
|
2020-09-11T20:06:17.000Z
|
2022-03-09T04:01:17.000Z
|
src/kernel/quantum_utils.py
|
alexk101/SeQUeNCe
|
3ae6a9c0f787e65b905fd28de29303af0c9420c1
|
[
"BSD-3-Clause"
] | 62
|
2020-09-03T16:49:06.000Z
|
2022-03-25T16:08:48.000Z
|
src/kernel/quantum_utils.py
|
alexk101/SeQUeNCe
|
3ae6a9c0f787e65b905fd28de29303af0c9420c1
|
[
"BSD-3-Clause"
] | 15
|
2020-09-11T20:06:26.000Z
|
2022-01-03T14:31:31.000Z
|
"""This module defines functions to handle cached measurement of quantum states.
These should not be used directly, but accessed by a QuantumManager instance.
"""
from functools import lru_cache
from typing import Tuple
from math import sqrt
from numpy import array, kron, identity, zeros, trace
@lru_cache(maxsize=1000)
def measure_state_with_cache_ket(state: Tuple[complex, complex]) -> float:
state = array(state)
M0 = array([[1, 0], [0, 0]], dtype=complex)
# probability of measuring basis[0]
prob_0 = (state.conj().T @ M0 @ state).real
return prob_0
@lru_cache(maxsize=1000)
def measure_entangled_state_with_cache_ket(state: Tuple[complex], state_index: int, num_states: int) -> Tuple[
Tuple[complex], Tuple[complex], float]:
state = array(state)
# generate projectors
projector0 = [1]
projector1 = [1]
for i in range(num_states):
if i == state_index:
projector0 = kron(projector0, [1, 0])
projector1 = kron(projector1, [0, 1])
else:
projector0 = kron(projector0, identity(2))
projector1 = kron(projector1, identity(2))
# probability of measuring basis[0]
prob_0 = (state.conj().T @ projector0.T @ projector0 @ state).real
if prob_0 >= 1:
state1 = None
else:
state1 = (projector1 @ state) / sqrt(1 - prob_0)
if prob_0 <= 0:
state0 = None
else:
state0 = (projector0 @ state) / sqrt(prob_0)
return (state0, state1, prob_0)
@lru_cache(maxsize=1000)
def measure_multiple_with_cache_ket(state: Tuple[complex], num_states: int, length_diff: int) -> Tuple[
Tuple[Tuple[complex]], Tuple[float]]:
state = array(state)
basis_count = 2 ** num_states
# construct measurement operators, projectors, and probabilities of measurement
projectors = [None] * basis_count
probabilities = [0] * basis_count
for i in range(basis_count):
M = zeros((1, basis_count), dtype=complex) # measurement operator
M[0, i] = 1
projectors[i] = kron(M, identity(2 ** length_diff)) # projector
probabilities[i] = (state.conj().T @ projectors[i].T @ projectors[i] @ state).real
if probabilities[i] < 0:
probabilities[i] = 0
if probabilities[i] > 1:
probabilities[i] = 1
return_states = [None] * len(projectors)
for i, proj in enumerate(projectors):
# project to new state
if probabilities[i] > 0:
new_state = (proj @ state) / sqrt(probabilities[i])
new_state = tuple(new_state)
return_states[i] = new_state
return (tuple(return_states), tuple(probabilities))
@lru_cache(maxsize=1000)
def measure_state_with_cache_density(state: Tuple[Tuple[complex, complex]]) -> float:
state = array(state)
M0 = array([[1, 0], [0, 0]], dtype=complex)
# probability of measuring basis[0]
prob_0 = trace(state @ M0).real
return prob_0
@lru_cache(maxsize=1000)
def measure_entangled_state_with_cache_density(state: Tuple[Tuple[complex]], state_index: int, num_states: int) -> Tuple[
Tuple[complex], Tuple[complex], float]:
state = array(state)
# generate projectors
projector0 = [1]
projector1 = [1]
for i in range(num_states):
if i == state_index:
projector0 = kron(projector0, [[1, 0], [0, 0]])
projector1 = kron(projector1, [[0, 0], [0, 1]])
else:
projector0 = kron(projector0, identity(2))
projector1 = kron(projector1, identity(2))
# probability of measuring basis[0]
prob_0 = trace(state @ projector0).real
if prob_0 >= 1:
state1 = None
else:
state1 = (projector1 @ state @ projector1) / (1 - prob_0)
if prob_0 <= 0:
state0 = None
else:
state0 = (projector0 @ state @ projector0) / prob_0
return (state0, state1, prob_0)
@lru_cache(maxsize=1000)
def measure_multiple_with_cache_density(state: Tuple[Tuple[complex]], num_states: int, length_diff: int) -> Tuple[
Tuple[Tuple[complex]], Tuple[float]]:
state = array(state)
basis_count = 2 ** num_states
# construct measurement operators, projectors, and probabilities of measurement
projectors = [None] * basis_count
probabilities = [0] * basis_count
for i in range(basis_count):
M = zeros((basis_count, basis_count), dtype=complex) # measurement operator
M[i, i] = 1
projectors[i] = kron(M, identity(2 ** length_diff)) # projector
probabilities[i] = trace(state @ projectors[i]).real
if probabilities[i] < 0:
probabilities[i] = 0
if probabilities[i] > 1:
probabilities[i] = 1
return_states = [None] * len(projectors)
for i, proj in enumerate(projectors):
# project to new state
if probabilities[i] > 0:
new_state = (proj @ state @ proj) / probabilities[i]
new_state = tuple(new_state)
return_states[i] = new_state
return (tuple(return_states), tuple(probabilities))
| 32.819355
| 121
| 0.633772
| 641
| 5,087
| 4.889236
| 0.146646
| 0.025526
| 0.037971
| 0.036375
| 0.874601
| 0.858647
| 0.856094
| 0.818762
| 0.807913
| 0.774091
| 0
| 0.037464
| 0.249656
| 5,087
| 154
| 122
| 33.032468
| 0.7836
| 0.116572
| 0
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d3b5ad5e6bf1d49d5a8fcfeff02ac85738be20c
| 84
|
py
|
Python
|
glue/plugins/data_factories/spectral_cube/__init__.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | 3
|
2015-09-10T22:23:55.000Z
|
2019-04-04T18:47:33.000Z
|
glue/plugins/data_factories/spectral_cube/__init__.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | null | null | null |
glue/plugins/data_factories/spectral_cube/__init__.py
|
ejeschke/glue
|
21689e3474aeaeb70e258d76c60755596856976c
|
[
"BSD-3-Clause"
] | null | null | null |
def setup():
from .spectral_cube import read_spectral_cube, parse_spectral_cube
| 28
| 70
| 0.809524
| 12
| 84
| 5.25
| 0.666667
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 84
| 2
| 71
| 42
| 0.863014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d516ba461eedd1ebb8d2c4ea693b31cf35454bae
| 52
|
py
|
Python
|
envs/gap_env/door_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
envs/gap_env/door_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
envs/gap_env/door_env/__init__.py
|
Wangweiyao/causal-manipulation
|
8e695a33e5d7cf32ce0d878dd66e5a57fde76b84
|
[
"MIT"
] | null | null | null |
from gap_env.door_env.door_env import SawyerDoorEnv
| 26
| 51
| 0.884615
| 9
| 52
| 4.777778
| 0.666667
| 0.325581
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d525e71edfb7974fcb264a2a99f967ec78a747f2
| 41,323
|
py
|
Python
|
kubernetes_env/benchmark/train_ticket_benchmark.py
|
dyn-tracing/tracing_env
|
80406b0422c495a785e2323948858526d3e40875
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_env/benchmark/train_ticket_benchmark.py
|
dyn-tracing/tracing_env
|
80406b0422c495a785e2323948858526d3e40875
|
[
"Apache-2.0"
] | 18
|
2021-01-27T17:16:13.000Z
|
2021-04-28T12:50:26.000Z
|
kubernetes_env/benchmark/train_ticket_benchmark.py
|
dyn-tracing/tracing_env
|
80406b0422c495a785e2323948858526d3e40875
|
[
"Apache-2.0"
] | 2
|
2021-02-04T04:10:38.000Z
|
2021-04-29T06:41:30.000Z
|
from locust import HttpUser, task, between, constant
from datetime import datetime, timedelta, date
from random import randint
import random
import json
import uuid
import numpy as np
import logging
import sys
import time
import os
import string
import logging
from requests.adapters import HTTPAdapter
import locust.stats
locust.stats.CONSOLE_STATS_INTERVAL_SEC = 1
locust.stats.CSV_STATS_FLUSH_INTERVAL_SEC = 10
locust.stats.PERCENTILES_TO_REPORT = [0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.98, 0.99, 0.999, 0.9999, 1.0]
DEP_DATE = "2021-01-08"
VERBOSE_LOGGING = ${LOCUST_VERBOSE_LOGGING}
def matrix_checker(matrix):
sum = np.sum(matrix, axis=1).tolist()
return sum[1:] == sum[:-1]
def sequence_generator(matrix, all_functions):
if(not(matrix_checker(matrix))):
raise Exception("Matrix is not correct")
max_sequence_len = 20
current_node = 0
i = 0
array = []
array.append(all_functions[0])
while(i < max_sequence_len):
if(1 in matrix[current_node] and matrix[current_node].tolist().index(1) == current_node):
break
selection = random.choices(
population=all_functions, weights=matrix[current_node])[0]
array.append(selection)
current_node = all_functions.index(selection)
i += 1
return array
def random_string_generator():
len = randint(8, 16)
prob = randint(0, 100)
if(prob < 25):
random_string = ''.join([random.choice(string.ascii_letters) for n in range(len)])
elif(prob < 50):
random_string = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(len)])
elif(prob < 75):
random_string = ''.join([random.choice(string.ascii_letters + string.digits + string.punctuation) for n in range(len)])
else:
random_string = ''
return random_string
def random_date_generator():
temp = randint(0, 4)
random_y = 2000 + temp*10 + randint(0, 9)
random_m = randint(1, 12)
random_d = randint(1, 31) # assumendo che la data possa essere non sensata (e.g. 30 Febbraio)
return str(random_y)+'-'+str(random_m)+'-'+str(random_d)
def postfix(expected = True):
if expected:
return '_expected'
return '_unexpected'
class Requests():
def __init__(self, client):
self.client = client
dir_path = os.path.dirname(os.path.realpath(__file__))
handler = logging.FileHandler(os.path.join(dir_path, "locustfile_debug.log"))
# handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
if VERBOSE_LOGGING==1:
logger = logging.getLogger("Debugging logger")
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.debugging_logger = logger
else:
self.debugging_logger = None
def log_verbose(self, to_log):
if self.debugging_logger!=None:
self.debugging_logger.debug(json.dumps(to_log))
def home(self, expected):
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get('/index.html', name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code, 'response_time': time.time() - start_time}
self.log_verbose(to_log)
def try_to_read_response_as_json(self, response):
try:
return response.json()
except:
try:
return response.content.decode('utf-8')
except:
return response.content
def search_ticket(self, departure_date, from_station, to_station, expected = True):
head = {"Accept": "application/json",
"Content-Type": "application/json"}
body_start = {
"startingPlace": from_station,
"endPlace": to_station,
"departureTime": departure_date
}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.post(
url = "/api/v1/travelservice/trips/left",
headers = head,
json = body_start,
catch_response = True,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
def search_departure(self, expected):
if(expected):
self.search_ticket(date.today().strftime(random_date_generator()), "Shang Hai", "Su Zhou", expected)
else:
self.search_ticket(date.today().strftime(random_date_generator()), random_string_generator(), "Su Zhou", expected)
def search_return(self, expected):
if(expected):
self.search_ticket(date.today().strftime(random_date_generator()), "Su Zhou", "Shang Hai", expected)
else:
self.search_ticket(date.today().strftime(random_date_generator()), random_string_generator(), "Shang Hai", expected)
def _create_user(self, expected):
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
document_num = None
with self.client.post(url = "/api/v1/adminuserservice/users",
headers = {
"Authorization": self.bearer, "Accept": "application/json", "Content-Type": "application/json"},
json = {"documentNum": document_num, "documentType": 0, "email": "string", "gender": 0, "password": self.user_name, "userName": self.user_name},
name = req_label) as response2:
to_log = {'name': req_label, 'expected': expected, 'status_code': response2.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response2)}
self.log_verbose(to_log)
def _navigate_to_client_login(self, expected = True):
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get('/client_login.html', name = req_label) as response:
to_log = {'name': req_label, 'expected': True, 'status_code': response.status_code, 'response_time': time.time() - start_time}
self.log_verbose(to_log)
def login(self, expected):
self._create_user(True)
self._navigate_to_client_login()
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
head = {"Accept": "application/json",
"Content-Type": "application/json"}
if(expected):
response = self.client.post(url = "/api/v1/users/login",
headers = head,
json = {
"username": self.user_name,
"password": self.user_name
}, name = req_label)
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
else:
response = self.client.post(url = "/api/v1/users/login",
headers = head,
json = {
"username": self.user_name,
# wrong password
"password": random_string_generator()
}, name = req_label)
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
response_as_json = response.json()["data"]
if response_as_json is not None:
token = response_as_json["token"]
self.bearer = "Bearer " + token
self.user_id = response_as_json["userId"]
# purchase ticket
def start_booking(self, expected):
departure_date = DEP_DATE
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get(
url = "/client_ticket_book.html?tripId=D1345&from=Shang%20Hai&to=Su%20Zhou&seatType=2&seat_price=50.0&date=" + departure_date,
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code, 'response_time': time.time() - start_time}
self.log_verbose(to_log)
def get_assurance_types(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get(
url = "/api/v1/assuranceservice/assurances/types",
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
def get_foods(self, expected):
departure_date = DEP_DATE
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get(
url = "/api/v1/foodservice/foods/" + departure_date + "/Shang%20Hai/Su%20Zhou/D1345",
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
def select_contact(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
response_contacts = self.client.get(
url = "/api/v1/contactservice/contacts/account/" + self.user_id,
headers = head,
name = req_label)
to_log = {'name': req_label, 'expected': expected, 'status_code': response_contacts.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response_contacts)}
self.log_verbose(to_log)
response_as_json_contacts = response_contacts.json()["data"]
if len(response_as_json_contacts) == 0:
req_label = 'set_new_contact' + postfix(expected)
response_contacts = self.client.post(
url="/api/v1/contactservice/contacts",
headers=head,
json = {
"name": self.user_id, "accountId": self.user_id, "documentType": "1", "documentNumber": self.user_id, "phoneNumber": "123456"},
name = req_label)
response_as_json_contacts = response_contacts.json()["data"]
self.contactid = response_as_json_contacts["id"]
else:
self.contactid = response_as_json_contacts[0]["id"]
def finish_booking(self, expected):
departure_date = DEP_DATE
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
if(expected):
body_for_reservation = {
"accountId": self.user_id,
"contactsId": self.contactid,
"tripId": "D1345",
"seatType": "2",
"date": departure_date,
"from": "Shang Hai",
"to": "Su Zhou",
"assurance": "0",
"foodType": 1,
"foodName": "Bone Soup",
"foodPrice": 2.5,
"stationName": "",
"storeName": ""
}
else:
body_for_reservation = {
"accountId": self.user_id,
"contactsId": self.contactid,
"tripId": random_string_generator(),
"seatType": "2",
"date": departure_date,
"from": "Shang Hai",
"to": "Su Zhou",
"assurance": "0",
"foodType": 1,
"foodName": "Bone Soup",
"foodPrice": 2.5,
"stationName": "",
"storeName": ""
}
start_time = time.time()
with self.client.post(
url = "/api/v1/preserveservice/preserve",
headers = head,
json = body_for_reservation,
catch_response = True,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
def select_order(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
response_order_refresh = self.client.post(
url = "/api/v1/orderservice/order/refresh",
name = req_label,
headers = head,
json = {
"loginId": self.user_id, "enableStateQuery": "false", "enableTravelDateQuery": "false", "enableBoughtDateQuery": "false", "travelDateStart": "null", "travelDateEnd": "null", "boughtDateStart": "null", "boughtDateEnd": "null"})
to_log = {'name': req_label, 'expected': expected, 'status_code': response_order_refresh.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response_order_refresh)}
self.log_verbose(to_log)
response_as_json = response_order_refresh.json()["data"]
self.order_id = response_as_json[0]["id"]
def pay(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
if(expected):
with self.client.post(
url = "/api/v1/inside_pay_service/inside_payment",
headers = head,
json = {"orderId": self.order_id, "tripId": "D1345"},
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
else:
with self.client.post(
url = "/api/v1/inside_pay_service/inside_payment",
headers = head,
json = {"orderId": random_string_generator(), "tripId": "D1345"},
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
# cancelNoRefund
def cancel_with_no_refund(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
if(expected):
with self.client.get(
url = "/api/v1/cancelservice/cancel/" + self.order_id + "/" + self.user_id,
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
else:
with self.client.get(
url = "/api/v1/cancelservice/cancel/" + self.order_id + "/" + random_string_generator(),
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
# user refund with voucher
def get_voucher(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
if(expected):
with self.client.post(
url = "/getVoucher",
headers = head,
json = {"orderId": self.order_id, "type": 1},
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
else:
with self.client.post(
url = "/getVoucher",
headers = head,
json = {"orderId": random_string_generator(), "type": 1},
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time}
self.log_verbose(to_log)
# consign ticket
def get_consigns(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
with self.client.get(
url = "/api/v1/consignservice/consigns/order/" + self.order_id,
headers = head,
name = req_label) as response:
to_log = {'name': req_label, 'expected': expected, 'status_code': response.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response)}
self.log_verbose(to_log)
def confirm_consign(self, expected):
head = {"Accept": "application/json",
"Content-Type": "application/json", "Authorization": self.bearer}
req_label = sys._getframe().f_code.co_name + postfix(expected)
start_time = time.time()
if(expected):
response_as_json_consign = self.client.put(
url = "/api/v1/consignservice/consigns",
name = req_label,
json = {
"accountId": self.user_id,
"handleDate": DEP_DATE,
"from": "Shang Hai",
"to": "Su Zhou",
"orderId": self.order_id,
"consignee": self.order_id,
"phone": "123",
"weight": "1",
"id": "",
"isWithin": "false"},
headers = head)
to_log = {'name': req_label, 'expected': expected, 'status_code': response_as_json_consign.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response_as_json_consign)}
self.log_verbose(to_log)
else:
response_as_json_consign = self.client.put(
url = "/api/v1/consignservice/consigns",
name = req_label,
json={
"accountId": self.user_id,
"handleDate": DEP_DATE,
"from": "Shang Hai",
"to": "Su Zhou",
"orderId": self.order_id,
"consignee": random_string_generator(),
"phone": random_string_generator(),
"weight": "1",
"id": "",
"isWithin": "false"}, headers=head)
to_log = {'name': req_label, 'expected': expected, 'status_code': response_as_json_consign.status_code,
'response_time': time.time() - start_time, 'response': self.try_to_read_response_as_json(response_as_json_consign)}
self.log_verbose(to_log)
def perform_task(self, name):
name_without_suffix = name.replace("_expected", "").replace("_unexpected", "")
task = getattr(self, name_without_suffix)
task(name.endswith('_expected'))
class UserNoLogin(HttpUser):
weight = 1
wait_time = constant(1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client.mount('https://', HTTPAdapter(pool_maxsize=50))
self.client.mount('http://', HTTPAdapter(pool_maxsize=50))
@task
def perfom_task(self):
logging.debug("Running user 'no login'...")
all_functions = ["home_expected", "search_departure_expected",
"search_departure_unexpected", "search_return_expected", "search_return_unexpected"]
matrix = np.zeros((len(all_functions), len(all_functions)))
matrix[all_functions.index("home_expected"), all_functions.index("search_departure_expected")] = 0.8
matrix[all_functions.index("home_expected"), all_functions.index("search_departure_unexpected")] = 0.2
matrix[all_functions.index("search_departure_expected"), all_functions.index("search_return_expected")] = 0.8
matrix[all_functions.index("search_departure_expected"), all_functions.index("search_return_unexpected")] = 0.2
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_expected")] = 0.9
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_unexpected")] = 0.1
matrix[all_functions.index("search_return_expected"), all_functions.index("search_return_expected")] = 1
matrix[all_functions.index("search_return_unexpected"), all_functions.index("search_return_expected")] = 0.9
matrix[all_functions.index("search_return_unexpected"), all_functions.index("search_return_unexpected")] = 0.1
task_sequence = sequence_generator(matrix, all_functions)
requests = Requests(self.client)
requests.perform_task("home_expected")
requests.perform_task("search_departure_expected")
requests.perform_task("search_return_expected")
for task in task_sequence:
requests.perform_task(task)
class UserBooking(HttpUser):
weight = 1
wait_time = constant(1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client.mount('https://', HTTPAdapter(pool_maxsize=50))
self.client.mount('http://', HTTPAdapter(pool_maxsize=50))
@task
def perform_task(self):
logging.debug("Running user 'booking'...")
all_functions = [
"home_expected",
"login_expected",
"login_unexpected",
"search_departure_expected",
"search_departure_unexpected",
"start_booking_expected",
"get_assurance_types_expected",
"get_foods_expected",
"select_contact_expected",
"finish_booking_expected",
"finish_booking_unexpected",
"select_order_expected",
"pay_expected",
"pay_unexpected",
]
matrix = np.zeros((len(all_functions), len(all_functions)))
matrix[all_functions.index("home_expected"), all_functions.index("login_expected")] = 0.9
matrix[all_functions.index("home_expected"), all_functions.index("login_unexpected")] = 0.1
matrix[all_functions.index("login_unexpected"), all_functions.index("login_unexpected")] = 0.02
matrix[all_functions.index("login_unexpected"), all_functions.index("login_expected")] = 0.98
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_expected")] = 0.9 # 0.8
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_unexpected")] = 0.1 # 0.2
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_expected")] = 0.95
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_unexpected")] = 0.05
matrix[all_functions.index("search_departure_expected"), all_functions.index("start_booking_expected")] = 1
matrix[all_functions.index("start_booking_expected"), all_functions.index("get_assurance_types_expected")] = 1
matrix[all_functions.index("get_assurance_types_expected"), all_functions.index("get_foods_expected")] = 1
matrix[all_functions.index("get_foods_expected"), all_functions.index("select_contact_expected")] = 1
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_expected")] = 0.8
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_unexpected")] = 0.2
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_expected")] = 0.95
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_unexpected")] = 0.05
matrix[all_functions.index("finish_booking_expected"), all_functions.index("select_order_expected")] = 1
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_expected")] = 0.8
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_unexpected")] = 0.2
matrix[all_functions.index("pay_expected"), all_functions.index("pay_expected")] = 1
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_expected")] = 0.95
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_unexpected")] = 0.05
task_sequence = sequence_generator(matrix, all_functions)
requests = Requests(self.client)
for task in task_sequence:
requests.perform_task(task)
class UserConsignTicket(HttpUser):
weight = 0
wait_time = constant(1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client.mount('https://', HTTPAdapter(pool_maxsize=50))
self.client.mount('http://', HTTPAdapter(pool_maxsize=50))
@task
def perform_task(self):
logging.debug("Running user 'consign ticket'...")
all_functions = [
"home_expected",
"login_expected",
"login_unexpected",
"search_departure_expected",
"search_departure_unexpected",
"start_booking_expected",
"get_assurance_types_expected",
"get_foods_expected",
"select_contact_expected",
"finish_booking_expected",
"finish_booking_unexpected",
"select_order_expected",
"pay_expected",
"pay_unexpected",
"get_consigns_expected",
"confirm_consign_expected",
"confirm_consign_unexpected"
]
matrix = np.zeros((len(all_functions), len(all_functions)))
matrix[all_functions.index("home_expected"), all_functions.index("login_expected")] = 0.8 # 0.9
matrix[all_functions.index("home_expected"), all_functions.index("login_unexpected")] = 0.2 # 0.1
matrix[all_functions.index("login_unexpected"), all_functions.index("login_unexpected")] = 0.15 # 0.02
matrix[all_functions.index("login_unexpected"), all_functions.index("login_expected")] = 0.85 # 0.98
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_expected")] = 0.7 # 0.8
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_unexpected")] = 0.3 # 0.2
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_expected")] = 0.85 # 0.95
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_unexpected")] = 0.15 # 0.05
matrix[all_functions.index("search_departure_expected"), all_functions.index("start_booking_expected")] = 1
matrix[all_functions.index("start_booking_expected"), all_functions.index("get_assurance_types_expected")] = 1
matrix[all_functions.index("get_assurance_types_expected"), all_functions.index("get_foods_expected")] = 1
matrix[all_functions.index("get_foods_expected"), all_functions.index("select_contact_expected")] = 1
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_expected")] = 0.75 # 0.8
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_unexpected")] = 0.25 # 0.2
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_expected")] = 0.9 # 0.95
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_unexpected")] = 0.1 # 0.05
matrix[all_functions.index("finish_booking_expected"), all_functions.index("select_order_expected")] = 1
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_expected")] = 0.7 # 0.8
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_unexpected")] = 0.3 # 0.2
matrix[all_functions.index("pay_expected"), all_functions.index("get_consigns_expected")] = 1
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_expected")] = 0.85 # 0.95
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_unexpected")] = 0.15 # 0.05
matrix[all_functions.index('get_consigns_expected'), all_functions.index('confirm_consign_expected')] = 0.8 # 0.9
matrix[all_functions.index('get_consigns_expected'), all_functions.index('confirm_consign_unexpected')] = 0.2 # 0.1
matrix[all_functions.index('confirm_consign_unexpected'), all_functions.index('confirm_consign_expected')] = 0.9 # 0.95
matrix[all_functions.index('confirm_consign_unexpected'), all_functions.index('confirm_consign_unexpected')] = 0.1 # 0.05
matrix[all_functions.index('confirm_consign_expected'), all_functions.index('confirm_consign_expected')] = 1
task_sequence = sequence_generator(matrix, all_functions)
requests = Requests(self.client)
for task in task_sequence:
requests.perform_task(task)
class UserCancelNoRefund(HttpUser):
weight = 1
wait_time = constant(1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client.mount('https://', HTTPAdapter(pool_maxsize=50))
self.client.mount('http://', HTTPAdapter(pool_maxsize=50))
@task
def perform_task(self):
logging.debug("Running user 'cancel no refund'...")
all_functions = [
"home_expected",
"login_expected",
"login_unexpected",
"search_departure_expected",
"search_departure_unexpected",
"start_booking_expected",
"get_assurance_types_expected",
"get_foods_expected",
"select_contact_expected",
"finish_booking_expected",
"finish_booking_unexpected",
"select_order_expected",
"pay_expected",
"pay_unexpected",
"cancel_with_no_refund_expected",
"cancel_with_no_refund_unexpected"
]
matrix = np.zeros((len(all_functions), len(all_functions)))
matrix[all_functions.index("home_expected"), all_functions.index("login_expected")] = 0.99 # 0.9
matrix[all_functions.index("home_expected"), all_functions.index("login_unexpected")] = 0.01 # 0.1
matrix[all_functions.index("login_unexpected"), all_functions.index("login_unexpected")] = 0.001 # 0.02
matrix[all_functions.index("login_unexpected"), all_functions.index("login_expected")] = 0.999 # 0.98
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_expected")] = 0.9 # 0.8
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_unexpected")] = 0.1 # 0.2
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_expected")] = 0.99 # 0.95
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_unexpected")] = 0.01 # 0.05
matrix[all_functions.index("search_departure_expected"), all_functions.index("start_booking_expected")] = 1
matrix[all_functions.index("start_booking_expected"), all_functions.index("get_assurance_types_expected")] = 1
matrix[all_functions.index("get_assurance_types_expected"), all_functions.index("get_foods_expected")] = 1
matrix[all_functions.index("get_foods_expected"), all_functions.index("select_contact_expected")] = 1
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_expected")] = 0.99 # 0.8
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_unexpected")] = 0.01 # 0.2
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_expected")] = 0.99 # 0.95
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_unexpected")] = 0.01 # 0.05
matrix[all_functions.index("finish_booking_expected"), all_functions.index("select_order_expected")] = 1
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_expected")] = 0.99 # 0.8
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_unexpected")] = 0.01 # 0.2
matrix[all_functions.index("pay_expected"), all_functions.index("cancel_with_no_refund_expected")] = 0.99 # 0.8
matrix[all_functions.index("pay_expected"), all_functions.index("cancel_with_no_refund_unexpected")] = 0.01 # 0.2
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_expected")] = 0.99 # 0.95
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_unexpected")] = 0.01 # 0.05
matrix[all_functions.index("cancel_with_no_refund_expected"), all_functions.index("cancel_with_no_refund_expected")] = 1
matrix[all_functions.index("cancel_with_no_refund_unexpected"), all_functions.index("cancel_with_no_refund_expected")] = 0.99 # 0.95
matrix[all_functions.index("cancel_with_no_refund_unexpected"), all_functions.index("cancel_with_no_refund_unexpected")] = 0.01 # 0.05
task_sequence = sequence_generator(matrix, all_functions)
requests = Requests(self.client)
for task in task_sequence:
requests.perform_task(task)
class UserRefundVoucher(HttpUser):
weight = 0
wait_time = constant(1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client.mount('https://', HTTPAdapter(pool_maxsize=50))
self.client.mount('http://', HTTPAdapter(pool_maxsize=50))
@task
def perform_task(self):
logging.debug("Running user 'refound voucher'...")
all_functions = [
"home_expected",
"login_expected",
"login_unexpected",
"search_departure_expected",
"search_departure_unexpected",
"start_booking_expected",
"get_assurance_types_expected",
"get_foods_expected",
"select_contact_expected",
"finish_booking_expected",
"finish_booking_unexpected",
"select_order_expected",
"pay_expected",
"pay_unexpected",
"get_voucher_expected",
"get_voucher_unexpected"
]
matrix = np.zeros((len(all_functions), len(all_functions)))
matrix[all_functions.index("home_expected"), all_functions.index("login_expected")] = 0.85 # 0.9
matrix[all_functions.index("home_expected"), all_functions.index("login_unexpected")] = 0.15 # 0.1
matrix[all_functions.index("login_unexpected"), all_functions.index("login_unexpected")] = 0.1 # 0.02
matrix[all_functions.index("login_unexpected"), all_functions.index("login_expected")] = 0.9 # 0.98
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_expected")] = 0.85 # 0.8
matrix[all_functions.index("login_expected"), all_functions.index("search_departure_unexpected")] = 0.15 # 0.2
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_expected")] = 0.9 # 0.95
matrix[all_functions.index("search_departure_unexpected"), all_functions.index("search_departure_unexpected")] = 0.1 # 0.05
matrix[all_functions.index("search_departure_expected"), all_functions.index("start_booking_expected")] = 1
matrix[all_functions.index("start_booking_expected"), all_functions.index("get_assurance_types_expected")] = 1
matrix[all_functions.index("get_assurance_types_expected"), all_functions.index("get_foods_expected")] = 1
matrix[all_functions.index("get_foods_expected"), all_functions.index("select_contact_expected")] = 1
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_expected")] = 0.8
matrix[all_functions.index("select_contact_expected"), all_functions.index("finish_booking_unexpected")] = 0.2
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_expected")] = 0.95
matrix[all_functions.index("finish_booking_unexpected"), all_functions.index("finish_booking_unexpected")] = 0.05
matrix[all_functions.index("finish_booking_expected"), all_functions.index("select_order_expected")] = 1
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_expected")] = 0.8
matrix[all_functions.index("select_order_expected"), all_functions.index("pay_unexpected")] = 0.2
matrix[all_functions.index("pay_expected"), all_functions.index("get_voucher_expected")] = 0.8
matrix[all_functions.index("pay_expected"), all_functions.index("get_voucher_unexpected")] = 0.2
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_expected")] = 0.9 # 0.95
matrix[all_functions.index("pay_unexpected"), all_functions.index("pay_unexpected")] = 0.1 # 0.05
matrix[all_functions.index("get_voucher_expected"), all_functions.index("get_voucher_expected")] = 1
matrix[all_functions.index("get_voucher_unexpected"), all_functions.index("get_voucher_expected")] = 0.85 # 0.95
matrix[all_functions.index("get_voucher_unexpected"), all_functions.index("get_voucher_unexpected")] = 0.15 # 0.05
task_sequence = sequence_generator(matrix, all_functions)
requests = Requests(self.client)
for task in task_sequence:
requests.perform_task(task)
| 48.330994
| 246
| 0.628076
| 4,696
| 41,323
| 5.215077
| 0.070273
| 0.119559
| 0.15341
| 0.103307
| 0.836137
| 0.825888
| 0.811392
| 0.794692
| 0.780686
| 0.765864
| 0
| 0.018545
| 0.24836
| 41,323
| 854
| 247
| 48.387588
| 0.769929
| 0.01118
| 0
| 0.604993
| 0
| 0.001468
| 0.239516
| 0.124488
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004405
| 0.022026
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5603cfd85727d4dfbde655ee3d1f81489ac2bc6
| 9,155
|
py
|
Python
|
test/promises/test_2_2_3.py
|
MeerkatLabs/sleekpromises
|
f31d2bf4ae57fa30d77e6aa0c91b146131d599e6
|
[
"BSD-3-Clause"
] | null | null | null |
test/promises/test_2_2_3.py
|
MeerkatLabs/sleekpromises
|
f31d2bf4ae57fa30d77e6aa0c91b146131d599e6
|
[
"BSD-3-Clause"
] | 1
|
2020-04-10T22:02:55.000Z
|
2020-04-10T22:02:55.000Z
|
test/promises/test_2_2_3.py
|
MeerkatLabs/sleekpromises
|
f31d2bf4ae57fa30d77e6aa0c91b146131d599e6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
2.2.3: If `onRejected` is a function,
https://github.com/promises-aplus/promises-tests/blob/2.1.1/lib/tests/2.2.3.js
"""
import threading
from sleekxmpp.test import SleekTest
class Promise_2_2_3_1_TestCase(SleekTest):
"""
2.2.3.1: it must be called after `promise` is rejected, with `promise`'s rejection reason as its first argument.
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_already_rejected(self):
self.session['called'] = False
event = threading.Event()
def rejected_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def fulfilled_called(arg):
self.assertFalse(self.session['called'])
# Create a promise and resolve it
promise = self.scheduler.promise()
promise.rejected(self.sentinel)
promise.then(fulfilled_called, rejected_called)
self.assertTrue(event.wait(1.0))
self.assertTrue(self.session['called'])
def test_immediately_rejected(self):
self.session['called'] = False
event = threading.Event()
def rejected_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def fulfilled_called(arg):
self.assertFalse(self.session)
# Create a promise and resolve it
promise = self.scheduler.promise()
promise.then(fulfilled_called, rejected_called)
promise.rejected(self.sentinel)
self.assertTrue(event.wait(1.0))
self.assertTrue(self.session['called'])
def test_eventually_rejected(self):
self.session['called'] = False
event = threading.Event()
def rejected_called(arg):
self.session['called'] = True
self.assertIs(self.sentinel, arg)
event.set()
def fulfilled_called(arg):
self.assertFalse(self.session)
def deferred_method():
self.session['promise'].rejected(self.sentinel)
# Create a promise and store it off
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(fulfilled_called, rejected_called)
# Schedule it on a different thread.
self.scheduler.schedule_task(deferred_method, delay=0.1)
self.assertTrue(event.wait(1.0))
self.assertTrue(self.session['called'])
class Promise_2_2_3_2_TestCase(SleekTest):
"""
2.2.3.2: it must not be called before `promise` is rejected
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_rejected_after_a_delay(self):
self.session['afterResolve'] = False
event = threading.Event()
def rejected_call(arg):
self.assertTrue(self.session['afterResolve'])
event.set()
def deferred():
promise.rejected(self.dummy)
self.session['afterResolve'] = True
# Create a promise and resolve it
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(None, rejected_call)
self.scheduler.schedule_task(deferred, delay=0.05)
event_wait = event.wait(1.0)
self.assertTrue(self.session['afterResolve'])
self.assertTrue(event_wait)
def test_never_rejected(self):
self.session['called'] = False
event = threading.Event()
def rejected_called(arg):
self.session['called'] = True
event.set()
promise = self.scheduler.promise()
promise.then(None, rejected_called)
event_wait = event.wait(0.150)
self.assertFalse(self.session['called'])
self.assertFalse(event_wait)
class Promise_2_2_3_3_TestCase(SleekTest):
"""
2.2.2.3: it must not be called more than once.
"""
dummy = {'dummy': 'dummy'}
sentinel = {'sentinel': 'sentinel'}
def setUp(self):
from sleekpromises import register_sleek_promises
register_sleek_promises()
self.session = {}
self.stream_start(plugins=['sleekpromises_scheduler', ])
self.scheduler = self.xmpp['sleekpromises_scheduler']
def tearDown(self):
self.stream_close()
def test_already_rejected(self):
self.session['times_called'] = 0
event = threading.Event()
def rejected(arg):
self.session['times_called'] += 1
event.set()
promise = self.scheduler.promise()
promise.rejected(self.dummy)
promise.then(None, rejected)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_reject_a_pending_promise_more_than_once_immediately(self):
self.session['times_called'] = 0
event = threading.Event()
def rejected(arg):
self.session['times_called'] += 1
event.set()
promise = self.scheduler.promise()
promise.then(None, rejected)
promise.rejected(self.dummy)
promise.rejected(self.dummy)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_reject_a_pending_promise_more_than_once_delayed(self):
self.session['times_called'] = 0
event = threading.Event()
def rejected(arg):
self.session['times_called'] += 1
event.set()
def deferred():
promise = self.session['promise']
promise.rejected(self.dummy)
promise.rejected(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(None, rejected)
self.scheduler.schedule_task(deferred, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_trying_to_reject_a_pending_promise_more_than_once_immediately_then_delayed(self):
self.session['times_called'] = 0
event = threading.Event()
def rejected(arg):
self.session['times_called'] += 1
event.set()
def deferred():
promise = self.session['promise']
promise.rejected(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(None, rejected)
promise.rejected(self.dummy)
self.scheduler.schedule_task(deferred, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual(1, self.session['times_called'])
def test_when_multiple_then_calls_are_made_spaced_apart_in_time(self):
self.session['times_called'] = [0, 0, 0]
event = threading.Event()
def rejected_0(arg):
self.session['times_called'][0] += 1
def rejected_1(arg):
self.session['times_called'][1] += 1
def rejected_2(arg):
self.session['times_called'][2] += 1
event.set()
def reject_function():
promise = self.session['promise']
promise.rejected(self.dummy)
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(None, rejected_0)
self.scheduler.schedule_task(lambda: promise.then(None, rejected_1), delay=0.05)
self.scheduler.schedule_task(lambda: promise.then(None, rejected_2), delay=0.10)
self.scheduler.schedule_task(reject_function, delay=0.50)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual([1, 1, 1], self.session['times_called'])
def test_when_then_is_interleaved_with_fulfillment(self):
self.session['times_called'] = [0, 0]
event = threading.Event()
def rejected_0(arg):
self.session['times_called'][0] += 1
def rejected_1(arg):
self.session['times_called'][1] += 1
event.set()
promise = self.scheduler.promise()
self.session['promise'] = promise
promise.then(None, rejected_0)
promise.rejected(self.dummy)
promise.then(None, rejected_1)
event_set = event.wait(1.0)
self.assertTrue(event_set)
self.assertEqual([1, 1], self.session['times_called'])
| 26.926471
| 116
| 0.61988
| 1,058
| 9,155
| 5.198488
| 0.109641
| 0.106
| 0.061091
| 0.084
| 0.853818
| 0.796909
| 0.779273
| 0.747636
| 0.714
| 0.685636
| 0
| 0.017624
| 0.26248
| 9,155
| 339
| 117
| 27.0059
| 0.796949
| 0.054943
| 0
| 0.813725
| 0
| 0
| 0.077254
| 0.016056
| 0
| 0
| 0
| 0
| 0.142157
| 1
| 0.191176
| false
| 0
| 0.02451
| 0
| 0.259804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5650de3b099aa22cf5789c4adfe4fe941453897
| 5,107
|
py
|
Python
|
flaskerize/attach_test.py
|
darkguinito/myflaskerize
|
e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-29T13:00:48.000Z
|
2020-11-29T13:00:48.000Z
|
flaskerize/attach_test.py
|
darkguinito/myflaskerize
|
e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b
|
[
"BSD-3-Clause"
] | null | null | null |
flaskerize/attach_test.py
|
darkguinito/myflaskerize
|
e76e3e4b6c91e2859b974aabf82e0ea5539bcf1b
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path
import pytest
from unittest.mock import MagicMock
from dataclasses import dataclass
from flaskerize.attach import attach
def test_flaskerize_generate():
import os
status = os.system("fz bundle --dry-run --from test/build/ --to app:create_app")
assert status == 0
def test_flaskerize_attach_from_cli(tmp_path):
import os
CONTENTS = """import os
from flask import Flask
def create_app():
app = Flask(__name__)
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
BP_CONTENTS = """import os
from flask import Blueprint, send_from_directory
site = Blueprint('site', __name__, static_folder='test/build/')
# Serve static site
@site.route('/')
def index():
return send_from_directory(site.static_folder, 'index.html')"""
bp_name = path.join(tmp_path, "_fz_bp.py")
with open(bp_name, "w") as fid:
fid.write(BP_CONTENTS)
status = os.system(f"fz attach --dry-run --to {app_file} {bp_name}")
assert status == 0
assert not os.path.isfile("should_not_create.py")
def test_attach_with_no_dry_run(tmp_path):
CONTENTS = """import os
from flask import Flask
def create_app():
app = Flask(__name__)
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
@dataclass
class Args:
to: str = path.join(tmp_path, app_file)
bp: str = path.join(tmp_path, "_fz_bp.py")
dry_run: bool = False
attach(Args())
assert path.isfile(path.join(tmp_path, app_file))
def test_attach_with_dry_run(tmp_path):
CONTENTS = """import os
from flask import Flask
def create_app():
app = Flask(__name__)
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
@dataclass
class Args:
to: str = app_file
bp: str = "_fz_bp.py"
dry_run: bool = True
attach(Args())
def test_attach_without_dry_run_raises_if_file_does_not_exist(tmp_path):
from os import path
from flaskerize import attach
CONTENTS = """import os
from flask import Flask
# a comment
def create_app():
app = Flask(__name__)
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
@dataclass
class Args:
to: str = app_file
bp: str = "_fz_bp.py"
dry_run: bool = False
_ = path.join(tmp_path, "outfile.py")
attach.split_file_factory = MagicMock(return_value=(app_file, "create_app"))
attach.attach(Args())
def test_attach_raises_with_no_target_function_call(tmp_path):
from os import path
from flaskerize import attach
CONTENTS = """import os
from flask import Flask
def misnamed_create_app():
app = Flask(__name__)
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
@dataclass
class Args:
to: str = app_file
bp: str = "_fz_bp.py"
dry_run: bool = False
_ = path.join(tmp_path, "outfile.py")
attach.split_file_factory = MagicMock(return_value=(app_file, "create_app"))
with pytest.raises(SyntaxError):
attach.attach(Args())
def test_attach_raises_with_no_Flask_call(tmp_path):
from os import path
from flaskerize import attach
CONTENTS = """import os
from flask import Flask
def create_app():
@app.route("/health")
def serve():
return "{{ name }} online!"
return app
if __name__ == "__main__":
app = create_app()
app.run()"""
app_file = path.join(tmp_path, "app.py")
with open(app_file, "w") as fid:
fid.write(CONTENTS)
@dataclass
class Args:
to: str = app_file
bp: str = "_fz_bp.py"
dry_run: bool = False
_ = path.join(tmp_path, "outfile.py")
attach.split_file_factory = MagicMock(return_value=(app_file, "create_app"))
with pytest.raises(SyntaxError):
attach.attach(Args())
| 22.697778
| 84
| 0.600548
| 672
| 5,107
| 4.251488
| 0.123512
| 0.053903
| 0.050053
| 0.068253
| 0.761288
| 0.747287
| 0.721036
| 0.701085
| 0.701085
| 0.677984
| 0
| 0.000542
| 0.277854
| 5,107
| 224
| 85
| 22.799107
| 0.774132
| 0
| 0
| 0.810127
| 1
| 0
| 0.433131
| 0.042099
| 0
| 0
| 0
| 0
| 0.025316
| 1
| 0.044304
| false
| 0
| 0.170886
| 0
| 0.424051
| 0.012658
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d59bcb47a8ad0d4bde990a4d6bb632083fee55e7
| 153
|
py
|
Python
|
python/8Kyu/Can we divide it.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
python/8Kyu/Can we divide it.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
python/8Kyu/Can we divide it.py
|
athasv/Codewars-data
|
5e106466e709fd776f23585ad9f652d0d65b48d3
|
[
"MIT"
] | null | null | null |
def is_divide_by(number, a, b):
import numpy as np
return True if np.abs(number) % np.abs(a) == 0 and np.abs(number) % np.abs(b) == 0 else False
| 51
| 98
| 0.640523
| 31
| 153
| 3.096774
| 0.612903
| 0.208333
| 0.229167
| 0.270833
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.20915
| 153
| 3
| 98
| 51
| 0.77686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6341128ac4fddea62cb53fcbdddef710281dbbf0
| 2,300
|
py
|
Python
|
tests/run/all_char_arrays_fwrap_doctest.py
|
wilsonify/fwrap
|
f2e20eb55eaa3de72905e2ef28198da00eebe262
|
[
"BSD-3-Clause"
] | 23
|
2015-02-25T00:24:15.000Z
|
2021-09-08T01:35:45.000Z
|
tests/run/all_char_arrays_fwrap_doctest.py
|
fwrap/fwrap
|
61a56f2d0050096b4973d88e5f11cfac2ef01a4b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T01:45:02.000Z
|
2021-09-08T01:45:02.000Z
|
tests/run/all_char_arrays_fwrap_doctest.py
|
fwrap/fwrap
|
61a56f2d0050096b4973d88e5f11cfac2ef01a4b
|
[
"BSD-3-Clause"
] | 4
|
2015-03-22T01:33:39.000Z
|
2021-09-09T15:25:44.000Z
|
from all_char_arrays_fwrap import *
import numpy as np
ll, n1, n2 = 6, 3, 4
ain = np.empty((n1,n2), dtype='S%d' % ll, order='F')
aout = ain.copy('F')
ainout = ain.copy('F')
ano = ain.copy('F')
aout_ = aout.copy('F')
ainout_ = ainout.copy('F')
ano_ = ano.copy('F')
def init(ain, aout, ainout, ano, aout_, ainout_, ano_):
ain.fill('ABCDEF')
aout.fill(' ')
ainout.fill('123456')
ano.fill(' ')
aout_[...] = ain
ano_[...] = ainout
ainout_.fill(ain[0,0][:3] + ano_[0,0][3:])
def test_results(func, args, results):
res_ = func(*args)
for r1, r2 in zip(res_, results):
if not np.all(r1 == r2):
print r1
print r2
return False
return True
__doc__ = u'''
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(assumed_shape, (ain, aout, ainout, ano), (aout_, ainout_, ano_))
True
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(explicit_shape, (ll, n1, n2, ain, aout, ainout, ano), (aout_, ainout_, ano_))
True
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(assumed_size, (n1, n2, ain, aout, ainout, ano), (aout_, ainout_, ano_))
True
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(assumed_size, (n1+1, n2, ain, aout, ainout, ano), (aout_, ainout_, ano_))
Traceback (most recent call last):
...
RuntimeError: an error was encountered when calling the 'assumed_size' wrapper.
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(explicit_shape, (ll+1, n1, n2, ain, aout, ainout, ano), (aout_, ainout_, ano_))
Traceback (most recent call last):
...
RuntimeError: an error was encountered when calling the 'explicit_shape' wrapper.
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(explicit_shape, (ll, n1+1, n2, ain, aout, ainout, ano), (aout_, ainout_, ano_))
Traceback (most recent call last):
...
RuntimeError: an error was encountered when calling the 'explicit_shape' wrapper.
>>> init(ain, aout, ainout, ano, aout_, ainout_, ano_)
>>> test_results(explicit_shape, (ll, n1, n2+1, ain, aout, ainout, ano), (aout_, ainout_, ano_))
Traceback (most recent call last):
...
RuntimeError: an error was encountered when calling the 'explicit_shape' wrapper.
'''
| 32.857143
| 96
| 0.648261
| 336
| 2,300
| 4.199405
| 0.205357
| 0.212615
| 0.2764
| 0.170092
| 0.732814
| 0.732814
| 0.732814
| 0.732814
| 0.709426
| 0.708009
| 0
| 0.021774
| 0.181304
| 2,300
| 69
| 97
| 33.333333
| 0.727562
| 0
| 0
| 0.368421
| 0
| 0.122807
| 0.69
| 0.083043
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.035088
| null | null | 0.035088
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
896cfb202d5aed84f4c7210eb7bd79fb1acf32e6
| 977
|
py
|
Python
|
jazzy/functions/LogicalOpFunc.py
|
joewashear007/jazzy
|
f646de7b2e54040abc91e7b737675d9f565c621b
|
[
"MIT"
] | null | null | null |
jazzy/functions/LogicalOpFunc.py
|
joewashear007/jazzy
|
f646de7b2e54040abc91e7b737675d9f565c621b
|
[
"MIT"
] | 8
|
2015-02-17T15:10:22.000Z
|
2015-03-03T04:12:43.000Z
|
jazzy/functions/LogicalOpFunc.py
|
joewashear007/jazzy
|
f646de7b2e54040abc91e7b737675d9f565c621b
|
[
"MIT"
] | null | null | null |
__all__ = ['jazAND', 'jazNOT', 'jazOR']
class jazAND:
def __init__(self):
self.command = "&";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
interpreter.GetScope().stack.append( int(topValue1) & int(topValue2))
return None
class jazNOT:
def __init__(self):
self.command = "!";
def call(self, interpreter, arg):
topValue = interpreter.GetScope().stack.pop()
interpreter.GetScope().stack.append(int( not topValue))
return None
class jazOR:
def __init__(self):
self.command = "|";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
interpreter.GetScope().stack.append( int(topValue1) | int(topValue2))
return None
Functions = {'jazAND': jazAND, 'jazNOT': jazNOT, 'jazOR': jazOR}
| 29.606061
| 77
| 0.627431
| 101
| 977
| 5.910891
| 0.237624
| 0.254606
| 0.321608
| 0.226131
| 0.792295
| 0.792295
| 0.792295
| 0.792295
| 0.792295
| 0.691792
| 0
| 0.010582
| 0.226203
| 977
| 32
| 78
| 30.53125
| 0.779101
| 0
| 0
| 0.52
| 0
| 0
| 0.037871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.24
| false
| 0
| 0
| 0
| 0.48
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
98634c6770aa3b5b42d46840e3da2f1079884d37
| 35,678
|
py
|
Python
|
tests/test_elbv2/test_elbv2.py
|
edeustace/moto
|
43aa6ca7561173b22d6bc5ce051bebf5ca1a3c17
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elbv2/test_elbv2.py
|
edeustace/moto
|
43aa6ca7561173b22d6bc5ce051bebf5ca1a3c17
|
[
"Apache-2.0"
] | 1
|
2021-12-13T20:51:54.000Z
|
2021-12-13T20:51:54.000Z
|
tests/test_elbv2/test_elbv2.py
|
edeustace/moto
|
43aa6ca7561173b22d6bc5ce051bebf5ca1a3c17
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import boto3
import botocore
from botocore.exceptions import ClientError
from nose.tools import assert_raises
import sure # noqa
from moto import mock_elbv2, mock_ec2
@mock_elbv2
@mock_ec2
def test_create_load_balancer():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
lb = response.get('LoadBalancers')[0]
lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com")
lb.get('LoadBalancerArn').should.equal(
'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188')
lb.get('SecurityGroups').should.equal([security_group.id])
lb.get('AvailabilityZones').should.equal([
{'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'},
{'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}])
# Ensure the tags persisted
response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')])
tags = {d['Key']: d['Value']
for d in response['TagDescriptions'][0]['Tags']}
tags.should.equal({'key_name': 'a_value'})
@mock_elbv2
@mock_ec2
def test_describe_load_balancers():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.describe_load_balancers()
response.get('LoadBalancers').should.have.length_of(1)
lb = response.get('LoadBalancers')[0]
lb.get('LoadBalancerName').should.equal('my-lb')
response = conn.describe_load_balancers(
LoadBalancerArns=[lb.get('LoadBalancerArn')])
response.get('LoadBalancers')[0].get(
'LoadBalancerName').should.equal('my-lb')
response = conn.describe_load_balancers(Names=['my-lb'])
response.get('LoadBalancers')[0].get(
'LoadBalancerName').should.equal('my-lb')
with assert_raises(ClientError):
conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn'])
with assert_raises(ClientError):
conn.describe_load_balancers(Names=['nope'])
@mock_elbv2
@mock_ec2
def test_add_remove_tags():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
lbs = conn.describe_load_balancers()['LoadBalancers']
lbs.should.have.length_of(1)
lb = lbs[0]
with assert_raises(ClientError):
conn.add_tags(ResourceArns=['missing-arn'],
Tags=[{
'Key': 'a',
'Value': 'b'
}])
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'a',
'Value': 'b'
}])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.should.have.key('a').which.should.equal('b')
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'a',
'Value': 'b'
}, {
'Key': 'b',
'Value': 'b'
}, {
'Key': 'c',
'Value': 'b'
}, {
'Key': 'd',
'Value': 'b'
}, {
'Key': 'e',
'Value': 'b'
}, {
'Key': 'f',
'Value': 'b'
}, {
'Key': 'g',
'Value': 'b'
}, {
'Key': 'h',
'Value': 'b'
}, {
'Key': 'j',
'Value': 'b'
}])
conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'k',
'Value': 'b'
}]).should.throw(botocore.exceptions.ClientError)
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'j',
'Value': 'c'
}])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.should.have.key('a').which.should.equal('b')
tags.should.have.key('b').which.should.equal('b')
tags.should.have.key('c').which.should.equal('b')
tags.should.have.key('d').which.should.equal('b')
tags.should.have.key('e').which.should.equal('b')
tags.should.have.key('f').which.should.equal('b')
tags.should.have.key('g').which.should.equal('b')
tags.should.have.key('h').which.should.equal('b')
tags.should.have.key('j').which.should.equal('c')
tags.shouldnt.have.key('k')
conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')],
TagKeys=['a'])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.shouldnt.have.key('a')
tags.should.have.key('b').which.should.equal('b')
tags.should.have.key('c').which.should.equal('b')
tags.should.have.key('d').which.should.equal('b')
tags.should.have.key('e').which.should.equal('b')
tags.should.have.key('f').which.should.equal('b')
tags.should.have.key('g').which.should.equal('b')
tags.should.have.key('h').which.should.equal('b')
tags.should.have.key('j').which.should.equal('c')
@mock_elbv2
@mock_ec2
def test_create_elb_in_multiple_region():
for region in ['us-west-1', 'us-west-2']:
conn = boto3.client('elbv2', region_name=region)
ec2 = boto3.resource('ec2', region_name=region)
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(
CidrBlock='172.28.7.0/24',
InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone=region + 'a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone=region + 'b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
list(
boto3.client(
'elbv2',
region_name='us-west-1').describe_load_balancers().get('LoadBalancers')
).should.have.length_of(1)
list(
boto3.client(
'elbv2',
region_name='us-west-2').describe_load_balancers().get('LoadBalancers')
).should.have.length_of(1)
@mock_elbv2
@mock_ec2
def test_create_target_group_and_listeners():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
target_group_arn = target_group['TargetGroupArn']
# Add tags to the target group
conn.add_tags(ResourceArns=[target_group_arn], Tags=[
{'Key': 'target', 'Value': 'group'}])
conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal(
[{'Key': 'target', 'Value': 'group'}])
# Check it's in the describe_target_groups response
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
# Plain HTTP listener
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(80)
listener.get('Protocol').should.equal('HTTP')
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
http_listener_arn = listener.get('ListenerArn')
# And another with SSL
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTPS',
Port=443,
Certificates=[
{'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}],
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(443)
listener.get('Protocol').should.equal('HTTPS')
listener.get('Certificates').should.equal([{
'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert',
}])
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
https_listener_arn = listener.get('ListenerArn')
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(2)
response = conn.describe_listeners(ListenerArns=[https_listener_arn])
response.get('Listeners').should.have.length_of(1)
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(443)
listener.get('Protocol').should.equal('HTTPS')
response = conn.describe_listeners(
ListenerArns=[
http_listener_arn,
https_listener_arn])
response.get('Listeners').should.have.length_of(2)
# Try to delete the target group and it fails because there's a
# listener referencing it
with assert_raises(ClientError) as e:
conn.delete_target_group(
TargetGroupArn=target_group.get('TargetGroupArn'))
e.exception.operation_name.should.equal('DeleteTargetGroup')
e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA
# Delete one listener
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(2)
conn.delete_listener(ListenerArn=http_listener_arn)
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(1)
# Then delete the load balancer
conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn)
# It's gone
response = conn.describe_load_balancers()
response.get('LoadBalancers').should.have.length_of(0)
# And it deleted the remaining listener
response = conn.describe_listeners(
ListenerArns=[
http_listener_arn,
https_listener_arn])
response.get('Listeners').should.have.length_of(0)
# But not the target groups
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
# Which we'll now delete
conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn'))
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(0)
@mock_elbv2
@mock_ec2
def test_create_invalid_target_group():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
# Fail to create target group with name which length is 33
long_name = 'A' * 33
with assert_raises(ClientError):
conn.create_target_group(
Name=long_name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
invalid_names = [
'-name',
'name-',
'-name-',
'example.com',
'test@test',
'Na--me']
for name in invalid_names:
with assert_raises(ClientError):
conn.create_target_group(
Name=name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
valid_names = ['name', 'Name', '000']
for name in valid_names:
conn.create_target_group(
Name=name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
@mock_elbv2
@mock_ec2
def test_describe_paginated_balancers():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
for i in range(51):
conn.create_load_balancer(
Name='my-lb%d' % i,
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
resp = conn.describe_load_balancers()
resp['LoadBalancers'].should.have.length_of(50)
resp['NextMarker'].should.equal(
resp['LoadBalancers'][-1]['LoadBalancerName'])
resp2 = conn.describe_load_balancers(Marker=resp['NextMarker'])
resp2['LoadBalancers'].should.have.length_of(1)
assert 'NextToken' not in resp2.keys()
@mock_elbv2
@mock_ec2
def test_delete_load_balancer():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response.get('LoadBalancers').should.have.length_of(1)
lb = response.get('LoadBalancers')[0]
conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn'))
balancers = conn.describe_load_balancers().get('LoadBalancers')
balancers.should.have.length_of(0)
@mock_ec2
@mock_elbv2
def test_register_targets():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(0)
response = ec2.create_instances(
ImageId='ami-1234abcd', MinCount=2, MaxCount=2)
instance_id1 = response[0].id
instance_id2 = response[1].id
response = conn.register_targets(
TargetGroupArn=target_group.get('TargetGroupArn'),
Targets=[
{
'Id': instance_id1,
'Port': 5060,
},
{
'Id': instance_id2,
'Port': 4030,
},
])
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(2)
response = conn.deregister_targets(
TargetGroupArn=target_group.get('TargetGroupArn'),
Targets=[{'Id': instance_id2}])
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(1)
@mock_ec2
@mock_elbv2
def test_target_group_attributes():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# Check it's in the describe_target_groups response
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
target_group_arn = target_group['TargetGroupArn']
# check if Names filter works
response = conn.describe_target_groups(Names=[])
response = conn.describe_target_groups(Names=['a-target'])
response.get('TargetGroups').should.have.length_of(1)
target_group_arn = target_group['TargetGroupArn']
# The attributes should start with the two defaults
response = conn.describe_target_group_attributes(
TargetGroupArn=target_group_arn)
response['Attributes'].should.have.length_of(2)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['deregistration_delay.timeout_seconds'].should.equal('300')
attributes['stickiness.enabled'].should.equal('false')
# Add cookie stickiness
response = conn.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{
'Key': 'stickiness.enabled',
'Value': 'true',
},
{
'Key': 'stickiness.type',
'Value': 'lb_cookie',
},
])
# The response should have only the keys updated
response['Attributes'].should.have.length_of(2)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['stickiness.type'].should.equal('lb_cookie')
attributes['stickiness.enabled'].should.equal('true')
# These new values should be in the full attribute list
response = conn.describe_target_group_attributes(
TargetGroupArn=target_group_arn)
response['Attributes'].should.have.length_of(3)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['stickiness.type'].should.equal('lb_cookie')
attributes['stickiness.enabled'].should.equal('true')
@mock_elbv2
@mock_ec2
def test_handle_listener_rules():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# Plain HTTP listener
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(80)
listener.get('Protocol').should.equal('HTTP')
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
http_listener_arn = listener.get('ListenerArn')
# create first rule
priority = 100
host = 'xxx.example.com'
path_pattern = 'foobar'
created_rule = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)['Rules'][0]
created_rule['Priority'].should.equal('100')
# check if rules is sorted by priority
priority = 50
host = 'yyy.example.com'
path_pattern = 'foobar'
rules = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for PriorityInUse
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for describe listeners
obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn)
len(obtained_rules['Rules']).should.equal(3)
priorities = [rule['Priority'] for rule in obtained_rules['Rules']]
priorities.should.equal(['50', '100', 'default'])
first_rule = obtained_rules['Rules'][0]
second_rule = obtained_rules['Rules'][1]
obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']])
obtained_rules['Rules'].should.equal([first_rule])
# test for pagination
obtained_rules = conn.describe_rules(
ListenerArn=http_listener_arn, PageSize=1)
len(obtained_rules['Rules']).should.equal(1)
obtained_rules.should.have.key('NextMarker')
next_marker = obtained_rules['NextMarker']
following_rules = conn.describe_rules(
ListenerArn=http_listener_arn,
PageSize=1,
Marker=next_marker)
len(following_rules['Rules']).should.equal(1)
following_rules.should.have.key('NextMarker')
following_rules['Rules'][0]['RuleArn'].should_not.equal(
obtained_rules['Rules'][0]['RuleArn'])
# test for invalid describe rule request
with assert_raises(ClientError):
conn.describe_rules()
with assert_raises(ClientError):
conn.describe_rules(RuleArns=[])
with assert_raises(ClientError):
conn.describe_rules(
ListenerArn=http_listener_arn,
RuleArns=[first_rule['RuleArn']]
)
# modify rule partially
new_host = 'new.example.com'
new_path_pattern = 'new_path'
modified_rule = conn.modify_rule(
RuleArn=first_rule['RuleArn'],
Conditions=[{
'Field': 'host-header',
'Values': [new_host]
},
{
'Field': 'path-pattern',
'Values': [new_path_pattern]
}]
)['Rules'][0]
rules = conn.describe_rules(ListenerArn=http_listener_arn)
obtained_rule = rules['Rules'][0]
modified_rule.should.equal(obtained_rule)
obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host)
obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern)
obtained_rule['Actions'][0]['TargetGroupArn'].should.equal(
target_group.get('TargetGroupArn'))
# modify priority
conn.set_rule_priorities(
RulePriorities=[
{'RuleArn': first_rule['RuleArn'],
'Priority': int(first_rule['Priority']) - 1}
]
)
with assert_raises(ClientError):
conn.set_rule_priorities(
RulePriorities=[
{'RuleArn': first_rule['RuleArn'], 'Priority': 999},
{'RuleArn': second_rule['RuleArn'], 'Priority': 999}
]
)
# delete
arn = first_rule['RuleArn']
conn.delete_rule(RuleArn=arn)
rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules']
len(rules).should.equal(2)
# test for invalid action type
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward2'
}]
)
# test for invalid action type
safe_priority = 2
invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x'
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': invalid_target_group_arn,
'Type': 'forward'
}]
)
# test for invalid condition field_name
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'xxxxxxx',
'Values': [host]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for emptry condition value
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': []
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for multiple condition value
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host, host]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
@mock_elbv2
@mock_ec2
def test_describe_invalid_target_group():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
# Check error raises correctly
with assert_raises(ClientError):
conn.describe_target_groups(Names=['invalid'])
| 34.538238
| 270
| 0.602668
| 3,847
| 35,678
| 5.434364
| 0.080842
| 0.03157
| 0.022769
| 0.024395
| 0.821726
| 0.779537
| 0.755477
| 0.725773
| 0.708887
| 0.675117
| 0
| 0.033874
| 0.255311
| 35,678
| 1,032
| 271
| 34.571705
| 0.752983
| 0.032037
| 0
| 0.734411
| 0
| 0.002309
| 0.167126
| 0.011741
| 0
| 0
| 0
| 0
| 0.02194
| 1
| 0.013857
| false
| 0
| 0.008083
| 0
| 0.02194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98ae7f305ca79e753d33191678cec81f85e9b897
| 269
|
py
|
Python
|
JeVois_model_changer/jevois_model_change.py
|
amirhossein-p/Benchmarking_Cameras
|
11b20b5709b2321ee71a33647c5c1441dead28a3
|
[
"MIT"
] | null | null | null |
JeVois_model_changer/jevois_model_change.py
|
amirhossein-p/Benchmarking_Cameras
|
11b20b5709b2321ee71a33647c5c1441dead28a3
|
[
"MIT"
] | null | null | null |
JeVois_model_changer/jevois_model_change.py
|
amirhossein-p/Benchmarking_Cameras
|
11b20b5709b2321ee71a33647c5c1441dead28a3
|
[
"MIT"
] | null | null | null |
import sys
import os
arg1 = sys.argv[1]
os.system('rm /media/amirhossein/JEVOIS/share/tensorflow/catdog/model.tflite')
os.system('mv /media/amirhossein/JEVOIS/share/tensorflow/catdog/' + arg1 + '.tflite /media/amirhossein/JEVOIS/share/tensorflow/catdog/model.tflite')
| 38.428571
| 148
| 0.780669
| 38
| 269
| 5.526316
| 0.447368
| 0.228571
| 0.314286
| 0.385714
| 0.719048
| 0.719048
| 0.514286
| 0.514286
| 0
| 0
| 0
| 0.011905
| 0.063197
| 269
| 6
| 149
| 44.833333
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.701493
| 0.649254
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
7f30da6e3569b6d4c3d37ab474653d1aa8bfd734
| 270
|
py
|
Python
|
gquant/plugin_nodes/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 1
|
2021-07-09T14:49:08.000Z
|
2021-07-09T14:49:08.000Z
|
gquant/plugin_nodes/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | null | null | null |
gquant/plugin_nodes/__init__.py
|
philtrade/gQuant
|
08b2a82a257c234b92f097b925f25cab16fd0926
|
[
"Apache-2.0"
] | 1
|
2021-03-22T19:54:38.000Z
|
2021-03-22T19:54:38.000Z
|
from .dataloader import * # noqa: F403,F401
from .analysis import * # noqa: F403,F401
from .transform import * # noqa: F403,F401
from .backtest import * # noqa: F403,F401
from .strategy import * # noqa: F403,F401
from .portofolio import * # noqa: F403,F401
| 38.571429
| 44
| 0.685185
| 36
| 270
| 5.138889
| 0.305556
| 0.324324
| 0.454054
| 0.583784
| 0.594595
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167442
| 0.203704
| 270
| 6
| 45
| 45
| 0.693023
| 0.351852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7f66117e1d3fb1c916e1a4b2a9c713b1a9e7f2f2
| 11,073
|
py
|
Python
|
abides-markets/tests/orderbook/test_market_orders.py
|
jpmorganchase/ABIDES-jpmc-gym
|
198736a1b1316190072356c980412569579f15a6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2021-09-23T13:17:26.000Z
|
2021-09-23T13:17:26.000Z
|
abides-markets/tests/orderbook/test_market_orders.py
|
jpmorganchase/ABIDES-gym
|
198736a1b1316190072356c980412569579f15a6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
abides-markets/tests/orderbook/test_market_orders.py
|
jpmorganchase/ABIDES-gym
|
198736a1b1316190072356c980412569579f15a6
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
import pytest
from abides_markets.orders import MarketOrder, Side
from . import setup_book_with_orders, SYMBOL, TIME
# fmt: off
def test_handle_market_order_bid_1():
"""Test buy order that partially consumes one order"""
book, agent, limit_orders = setup_book_with_orders(
asks=[
(100, [30]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.BID,
)
book.handle_market_order(market_order)
assert book.get_l3_ask_data() == [
(100, [20]),
]
assert len(agent.messages) == 2
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.ASK
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 10
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.BID
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 10
def test_handle_market_order_bid_2():
"""Test buy order that fully consumes one order"""
book, agent, limit_orders = setup_book_with_orders(
asks=[
(100, [30]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=30,
side=Side.BID,
)
book.handle_market_order(market_order)
assert book.get_l3_ask_data() == []
assert len(agent.messages) == 2
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.ASK
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 30
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.BID
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 30
def test_handle_market_order_bid_3():
"""Test buy order that consumes multiple orders"""
book, agent, limit_orders = setup_book_with_orders(
asks=[
(100, [30, 40]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=70,
side=Side.BID,
)
book.handle_market_order(market_order)
assert book.get_l3_ask_data() == []
assert len(agent.messages) == 4
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.ASK
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 30
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.BID
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 30
assert agent.messages[2][0] == 1
assert agent.messages[2][1].order.agent_id == 1
assert agent.messages[2][1].order.side == Side.ASK
assert agent.messages[2][1].order.fill_price == 100
assert agent.messages[2][1].order.quantity == 40
assert agent.messages[3][0] == 2
assert agent.messages[3][1].order.agent_id == 2
assert agent.messages[3][1].order.side == Side.BID
assert agent.messages[3][1].order.fill_price == 100
assert agent.messages[3][1].order.quantity == 40
def test_handle_market_order_bid_4():
"""Test buy order that consumes multiple orders at different prices"""
book, agent, limit_orders = setup_book_with_orders(
asks=[
(100, [30]),
(200, [40])
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=70,
side=Side.BID,
)
book.handle_market_order(market_order)
assert book.get_l3_ask_data() == []
assert len(agent.messages) == 4
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.ASK
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 30
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.BID
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 30
assert agent.messages[2][0] == 1
assert agent.messages[2][1].order.agent_id == 1
assert agent.messages[2][1].order.side == Side.ASK
assert agent.messages[2][1].order.fill_price == 200
assert agent.messages[2][1].order.quantity == 40
assert agent.messages[3][0] == 2
assert agent.messages[3][1].order.agent_id == 2
assert agent.messages[3][1].order.side == Side.BID
assert agent.messages[3][1].order.fill_price == 200
assert agent.messages[3][1].order.quantity == 40
def test_handle_market_order_ask_1():
"""Test sell order that partially consumes one order"""
book, agent, limit_orders = setup_book_with_orders(
bids=[
(100, [30]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.ASK,
)
book.handle_market_order(market_order)
assert book.get_l3_bid_data() == [
(100, [20]),
]
assert len(agent.messages) == 2
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.BID
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 10
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.ASK
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 10
def test_handle_market_order_ask_2():
"""Test sell order that fully consumes one order"""
book, agent, limit_orders = setup_book_with_orders(
bids=[
(100, [30]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=30,
side=Side.ASK,
)
book.handle_market_order(market_order)
assert book.get_l3_bid_data() == []
assert len(agent.messages) == 2
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.BID
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 30
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.ASK
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 30
def test_handle_market_order_ask_3():
"""Test sell order that consumes multiple orders"""
book, agent, limit_orders = setup_book_with_orders(
bids=[
(100, [30, 40]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=70,
side=Side.ASK,
)
book.handle_market_order(market_order)
assert book.get_l3_bid_data() == []
assert len(agent.messages) == 4
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.BID
assert agent.messages[0][1].order.fill_price == 100
assert agent.messages[0][1].order.quantity == 30
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.ASK
assert agent.messages[1][1].order.fill_price == 100
assert agent.messages[1][1].order.quantity == 30
assert agent.messages[2][0] == 1
assert agent.messages[2][1].order.agent_id == 1
assert agent.messages[2][1].order.side == Side.BID
assert agent.messages[2][1].order.fill_price == 100
assert agent.messages[2][1].order.quantity == 40
assert agent.messages[3][0] == 2
assert agent.messages[3][1].order.agent_id == 2
assert agent.messages[3][1].order.side == Side.ASK
assert agent.messages[3][1].order.fill_price == 100
assert agent.messages[3][1].order.quantity == 40
def test_handle_market_order_ask_4():
"""Test sell order that consumes multiple orders at different prices"""
book, agent, limit_orders = setup_book_with_orders(
bids=[
(200, [40]),
(100, [30]),
],
)
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=70,
side=Side.ASK,
)
book.handle_market_order(market_order)
assert book.get_l3_bid_data() == []
assert len(agent.messages) == 4
assert agent.messages[0][0] == 1
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.BID
assert agent.messages[0][1].order.fill_price == 200
assert agent.messages[0][1].order.quantity == 40
assert agent.messages[1][0] == 2
assert agent.messages[1][1].order.agent_id == 2
assert agent.messages[1][1].order.side == Side.ASK
assert agent.messages[1][1].order.fill_price == 200
assert agent.messages[1][1].order.quantity == 40
assert agent.messages[2][0] == 1
assert agent.messages[2][1].order.agent_id == 1
assert agent.messages[2][1].order.side == Side.BID
assert agent.messages[2][1].order.fill_price == 100
assert agent.messages[2][1].order.quantity == 30
assert agent.messages[3][0] == 2
assert agent.messages[3][1].order.agent_id == 2
assert agent.messages[3][1].order.side == Side.ASK
assert agent.messages[3][1].order.fill_price == 100
assert agent.messages[3][1].order.quantity == 30
def test_handle_bad_limit_orders():
book, _, _ = setup_book_with_orders()
# Symbol does not match book
order = MarketOrder(
agent_id=1,
time_placed=TIME,
symbol="BAD",
quantity=70,
side=Side.ASK,
)
with pytest.warns(UserWarning):
book.handle_market_order(order)
# Order quantity not integer
order = MarketOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=1.5,
side=Side.BID,
)
with pytest.warns(UserWarning):
book.handle_market_order(order)
# Order quantity is negative
order = MarketOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=-10,
side=Side.BID,
)
with pytest.warns(UserWarning):
book.handle_market_order(order)
| 30.336986
| 75
| 0.633975
| 1,584
| 11,073
| 4.294823
| 0.044192
| 0.244598
| 0.335146
| 0.117595
| 0.973394
| 0.962958
| 0.956784
| 0.944877
| 0.937528
| 0.930472
| 0
| 0.06144
| 0.222433
| 11,073
| 364
| 76
| 30.42033
| 0.728688
| 0.045336
| 0
| 0.828671
| 0
| 0
| 0.000285
| 0
| 0
| 0
| 0
| 0
| 0.475524
| 1
| 0.031469
| false
| 0
| 0.01049
| 0
| 0.041958
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
7f97574b3927ab8b9546a0cf571374c145fc8ab0
| 241
|
py
|
Python
|
package_bundler/settings.py
|
STPackageBundler/package-bundler
|
6c2a97f7b1db2dc5d6afff72557c09927095d851
|
[
"MIT"
] | 7
|
2015-01-24T05:22:31.000Z
|
2018-07-12T07:30:46.000Z
|
package_bundler/settings.py
|
STPackageBundler/package-bundler
|
6c2a97f7b1db2dc5d6afff72557c09927095d851
|
[
"MIT"
] | null | null | null |
package_bundler/settings.py
|
STPackageBundler/package-bundler
|
6c2a97f7b1db2dc5d6afff72557c09927095d851
|
[
"MIT"
] | null | null | null |
import sublime
def pb_settings_filename():
return 'Package Bundler.sublime-settings'
def st_settings_filename():
if int(sublime.version()) >= 2174:
return 'Preferences.sublime-settings'
return 'Global.sublime-settings'
| 24.1
| 45
| 0.73029
| 28
| 241
| 6.142857
| 0.571429
| 0.261628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.161826
| 241
| 9
| 46
| 26.777778
| 0.831683
| 0
| 0
| 0
| 0
| 0
| 0.344398
| 0.311203
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0.142857
| 0.857143
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
f6827bc91fbd4f2dc40417b8dbf29d2dde3b0610
| 4,297
|
py
|
Python
|
Project/weighted_and_unweighted_multinomial_nb_5x5.py
|
TOBEKNOWNABBAS/AI106394
|
51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7
|
[
"MIT"
] | null | null | null |
Project/weighted_and_unweighted_multinomial_nb_5x5.py
|
TOBEKNOWNABBAS/AI106394
|
51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7
|
[
"MIT"
] | null | null | null |
Project/weighted_and_unweighted_multinomial_nb_5x5.py
|
TOBEKNOWNABBAS/AI106394
|
51aa967ab63f9cc7fc64f7b9017d23f70bd5cfe7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Weighted and Unweighted_Multinomial_NB_5x5.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ZStEhvYIHrFvyUNmdqi1Ls6OAqywNQyb
"""
import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score
import math
#function to perform convolution
def convolve2D(image, filter):
fX, fY = filter.shape # Get filter dimensions
fNby2 = (fX//2)
n = 28
nn = n - (fNby2 *2) #new dimension of the reduced image size
newImage = np.zeros((nn,nn)) #empty new 2D imange
for i in range(0,nn):
for j in range(0,nn):
newImage[i][j] = np.sum(image[i:i+fX, j:j+fY]*filter)//25
return newImage
#Read Data from CSV
train = pd.read_csv("train.csv")
X = train.drop('label',axis=1)
Y = train['label']
# print(X)
#Create Filter for convolution
filter = np.array([[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]])
#convert from dataframe to numpy array
X = X.to_numpy()
print(X.shape)
#new array with reduced number of features to store the small size images
sX = np.empty((0,576), int)
# img = X[6]
ss = 500 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
# print(img2D.shape)
# print(img2D)
nImg = convolve2D(img2D,filter)
# print(nImg.shape)
# print(nImg)
nImg1D = np.reshape(nImg, (-1,576))
# print(nImg.shape)
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
# train and test model
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
clf = MultinomialNB()
clf.fit(sXTrain, yTrain)
print(clf.class_count_)
print(clf.score(sXTest, yTest))
import numpy as np
import sklearn as sk
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score
import math
#function to perform convolution
def convolve2D(image, filter):
fX, fY = filter.shape # Get filter dimensions
fNby2 = (fX//2)
n = 28
nn = n - (fNby2 *2) #new dimension of the reduced image size
newImage = np.zeros((nn,nn)) #empty new 2D imange
for i in range(0,nn):
for j in range(0,nn):
newImage[i][j] = np.sum(image[i:i+fX, j:j+fY]*filter)//25
return newImage
#Read Data from CSV
train = pd.read_csv("train.csv")
X = train.drop('label',axis=1)
Y = train['label']
# print(X)
#Create Filter for convolution
filter = np.array([[1,1,1,1,1],
[1,2,2,2,1],
[1,2,3,2,1],
[1,2,2,2,1],
[1,1,1,1,1]])
#convert from dataframe to numpy array
X = X.to_numpy()
print(X.shape)
#new array with reduced number of features to store the small size images
sX = np.empty((0,576), int)
# img = X[6]
ss = 500 #subset size for dry runs change to 42000 to run on whole data
#Perform convolve on all images
for img in X[0:ss,:]:
img2D = np.reshape(img, (28,28))
# print(img2D.shape)
# print(img2D)
nImg = convolve2D(img2D,filter)
# print(nImg.shape)
# print(nImg)
nImg1D = np.reshape(nImg, (-1,576))
# print(nImg.shape)
sX = np.append(sX, nImg1D, axis=0)
Y = Y.to_numpy()
sY = Y[0:ss]
# print(sY)
print(sY.shape)
print(sX.shape)
# train and test model
sXTrain, sXTest, yTrain, yTest = train_test_split(sX,sY,test_size=0.2,random_state=0)
print(sXTest.shape,", ",yTest.shape)
print(sXTrain.shape,", ",yTrain.shape)
clf = MultinomialNB()
clf.fit(sXTrain, yTrain)
print(clf.class_count_)
print(clf.score(sXTest, yTest))
| 25.730539
| 85
| 0.713288
| 714
| 4,297
| 4.243697
| 0.207283
| 0.023762
| 0.030693
| 0.036964
| 0.940924
| 0.940924
| 0.940924
| 0.937624
| 0.937624
| 0.937624
| 0
| 0.041368
| 0.156155
| 4,297
| 167
| 86
| 25.730539
| 0.794264
| 0.278799
| 0
| 0.990196
| 1
| 0
| 0.015057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.254902
| 0
| 0.294118
| 0.137255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f6c3db64ab84927e9bbcf3fbbf0efdb903993781
| 1,885
|
py
|
Python
|
test/ResultsAndPrizes/5x36(old)/test_5x36_winning_numbers_for_several_draws.py
|
FearFactor1/SPA
|
a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7
|
[
"Apache-2.0"
] | 1
|
2019-12-05T06:50:54.000Z
|
2019-12-05T06:50:54.000Z
|
test/ResultsAndPrizes/5x36(old)/test_5x36_winning_numbers_for_several_draws.py
|
FearFactor1/SPA
|
a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7
|
[
"Apache-2.0"
] | null | null | null |
test/ResultsAndPrizes/5x36(old)/test_5x36_winning_numbers_for_several_draws.py
|
FearFactor1/SPA
|
a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7
|
[
"Apache-2.0"
] | null | null | null |
# 5из36(Старая) + Выигрышные номера нескольких тиражей
def test_5x36_winning_numbers_for_several_draws(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_the_winning_numbers_for_several_draws()
app.ResultAndPrizes.click_ok_for_several_draws_modal_window()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "ВЫИГРЫШНЫЕ НОМЕРА" in app.ResultAndPrizes.parser_report_text_winners()
assert "ЛОТО 5/36 (Старая) - Тираж 10573 :" in app.ResultAndPrizes.parser_report_text_winners()
assert "07/09/2017, 19:00:00 ЛОК" in app.ResultAndPrizes.parser_report_text_winners()
assert "24 04 18 23 05" in app.ResultAndPrizes.parser_report_text_winners()
assert "ЛОТО 5/36 (Старая) - Тираж 10572 :" in app.ResultAndPrizes.parser_report_text_winners()
assert "07/09/2017, 18:16:00 ЛОК" in app.ResultAndPrizes.parser_report_text_winners()
assert "04 02 20 13 11" in app.ResultAndPrizes.parser_report_text_winners()
assert "ЛОТО 5/36 (Старая) - Тираж 10571 :" in app.ResultAndPrizes.parser_report_text_winners()
assert "07/09/2017, 18:01:00 ЛОК" in app.ResultAndPrizes.parser_report_text_winners()
assert "23 35 20 03 05" in app.ResultAndPrizes.parser_report_text_winners()
assert "ЛОТО 5/36 (Старая) - Тираж 10570 :" in app.ResultAndPrizes.parser_report_text_winners()
assert "07/09/2017, 17:46:00 ЛОК" in app.ResultAndPrizes.parser_report_text_winners()
assert "14 16 03 10 13" in app.ResultAndPrizes.parser_report_text_winners()
assert "ЛОТО 5/36 (Старая) - Тираж 10569 :" in app.ResultAndPrizes.parser_report_text_winners()
assert "07/09/2017, 17:31:00 ЛОК" in app.ResultAndPrizes.parser_report_text_winners()
assert "19 18 01 07 33" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.comeback_main_page()
| 69.814815
| 99
| 0.784085
| 277
| 1,885
| 5.054152
| 0.252708
| 0.282857
| 0.291429
| 0.364286
| 0.754286
| 0.754286
| 0.708571
| 0.644286
| 0.644286
| 0.644286
| 0
| 0.100302
| 0.122016
| 1,885
| 27
| 100
| 69.814815
| 0.745619
| 0.027586
| 0
| 0
| 0
| 0
| 0.205786
| 0
| 0
| 0
| 0
| 0
| 0.695652
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6f96b01215aa92cb4acb6be7b5b0f960563bf8b
| 41,262
|
py
|
Python
|
tests/test_nodes_stats_parser.py
|
Showmax/prometheus-es-exporter
|
6284b5c948222e177c19d1283948a064f9ba57bf
|
[
"MIT"
] | null | null | null |
tests/test_nodes_stats_parser.py
|
Showmax/prometheus-es-exporter
|
6284b5c948222e177c19d1283948a064f9ba57bf
|
[
"MIT"
] | null | null | null |
tests/test_nodes_stats_parser.py
|
Showmax/prometheus-es-exporter
|
6284b5c948222e177c19d1283948a064f9ba57bf
|
[
"MIT"
] | 1
|
2018-10-23T11:51:15.000Z
|
2018-10-23T11:51:15.000Z
|
import unittest
from prometheus_es_exporter.nodes_stats_parser import parse_response
from tests.utils import convert_result
# Sample responses generated by querying the endpoint on a Elasticsearch
# server populated with the following data (http command = Httpie utility):
# > http -v POST localhost:9200/foo/bar/1 val:=1 group1=a group2=a
# > http -v POST localhost:9200/foo/bar/2 val:=2 group1=a group2=b
# > http -v POST localhost:9200/foo/bar/3 val:=3 group1=b group2=b
# Some details are instance specific, so mileage may vary!
class Test(unittest.TestCase):
maxDiff = None
def test_endpoint(self):
# Endpoint: /_nodes/stats?pretty
response = {
'_nodes': {
'total': 1,
'successful': 1,
'failed': 0
},
'cluster_name': 'elasticsearch',
'nodes': {
'bRcKq5zUTAuwNf4qvnXzIQ': {
'timestamp': 1484861642281,
'name': 'bRcKq5z',
'transport_address': '127.0.0.1:9300',
'host': '127.0.0.1',
'ip': '127.0.0.1:9300',
'roles': [
'master',
'data',
'ingest'
],
'indices': {
'docs': {
'count': 3,
'deleted': 0
},
'store': {
'size_in_bytes': 12972,
'throttle_time_in_millis': 0
},
'indexing': {
'index_total': 3,
'index_time_in_millis': 95,
'index_current': 0,
'index_failed': 0,
'delete_total': 0,
'delete_time_in_millis': 0,
'delete_current': 0,
'noop_update_total': 0,
'is_throttled': False,
'throttle_time_in_millis': 0
},
'get': {
'total': 0,
'time_in_millis': 0,
'exists_total': 0,
'exists_time_in_millis': 0,
'missing_total': 0,
'missing_time_in_millis': 0,
'current': 0
},
'search': {
'open_contexts': 0,
'query_total': 0,
'query_time_in_millis': 0,
'query_current': 0,
'fetch_total': 0,
'fetch_time_in_millis': 0,
'fetch_current': 0,
'scroll_total': 0,
'scroll_time_in_millis': 0,
'scroll_current': 0,
'suggest_total': 0,
'suggest_time_in_millis': 0,
'suggest_current': 0
},
'merges': {
'current': 0,
'current_docs': 0,
'current_size_in_bytes': 0,
'total': 0,
'total_time_in_millis': 0,
'total_docs': 0,
'total_size_in_bytes': 0,
'total_stopped_time_in_millis': 0,
'total_throttled_time_in_millis': 0,
'total_auto_throttle_in_bytes': 104857600
},
'refresh': {
'total': 6,
'total_time_in_millis': 304
},
'flush': {
'total': 3,
'total_time_in_millis': 72
},
'warmer': {
'current': 0,
'total': 14,
'total_time_in_millis': 19
},
'query_cache': {
'memory_size_in_bytes': 0,
'total_count': 0,
'hit_count': 0,
'miss_count': 0,
'cache_size': 0,
'cache_count': 0,
'evictions': 0
},
'fielddata': {
'memory_size_in_bytes': 0,
'evictions': 0
},
'completion': {
'size_in_bytes': 0
},
'segments': {
'count': 3,
'memory_in_bytes': 7908,
'terms_memory_in_bytes': 5976,
'stored_fields_memory_in_bytes': 936,
'term_vectors_memory_in_bytes': 0,
'norms_memory_in_bytes': 576,
'points_memory_in_bytes': 144,
'doc_values_memory_in_bytes': 276,
'index_writer_memory_in_bytes': 0,
'version_map_memory_in_bytes': 0,
'fixed_bit_set_memory_in_bytes': 0,
'max_unsafe_auto_id_timestamp': -1,
'file_sizes': {}
},
'translog': {
'operations': 0,
'size_in_bytes': 215
},
'request_cache': {
'memory_size_in_bytes': 0,
'evictions': 0,
'hit_count': 0,
'miss_count': 0
},
'recovery': {
'current_as_source': 0,
'current_as_target': 0,
'throttle_time_in_millis': 0
}
},
'os': {
'timestamp': 1484861642359,
'cpu': {
'percent': 53,
'load_average': {
'1m': 2.53,
'5m': 2.3,
'15m': 2.23
}
},
'mem': {
'total_in_bytes': 16703762432,
'free_in_bytes': 164323328,
'used_in_bytes': 16539439104,
'free_percent': 1,
'used_percent': 99
},
'swap': {
'total_in_bytes': 17054035968,
'free_in_bytes': 12281872384,
'used_in_bytes': 4772163584
}
},
'process': {
'timestamp': 1484861642360,
'open_file_descriptors': 180,
'max_file_descriptors': 1048576,
'cpu': {
'percent': 0,
'total_in_millis': 28270
},
'mem': {
'total_virtual_in_bytes': 5947977728
}
},
'jvm': {
'timestamp': 1484861642361,
'uptime_in_millis': 614767,
'mem': {
'heap_used_in_bytes': 233688144,
'heap_used_percent': 11,
'heap_committed_in_bytes': 2112618496,
'heap_max_in_bytes': 2112618496,
'non_heap_used_in_bytes': 67167936,
'non_heap_committed_in_bytes': 71741440,
'pools': {
'young': {
'used_in_bytes': 189809608,
'max_in_bytes': 279183360,
'peak_used_in_bytes': 279183360,
'peak_max_in_bytes': 279183360
},
'survivor': {
'used_in_bytes': 34865136,
'max_in_bytes': 34865152,
'peak_used_in_bytes': 34865136,
'peak_max_in_bytes': 34865152
},
'old': {
'used_in_bytes': 9013400,
'max_in_bytes': 1798569984,
'peak_used_in_bytes': 9013400,
'peak_max_in_bytes': 1798569984
}
}
},
'threads': {
'count': 40,
'peak_count': 46
},
'gc': {
'collectors': {
'young': {
'collection_count': 2,
'collection_time_in_millis': 189
},
'old': {
'collection_count': 1,
'collection_time_in_millis': 143
}
}
},
'buffer_pools': {
'direct': {
'count': 29,
'used_in_bytes': 87069546,
'total_capacity_in_bytes': 87069545
},
'mapped': {
'count': 3,
'used_in_bytes': 9658,
'total_capacity_in_bytes': 9658
}
},
'classes': {
'current_loaded_count': 10236,
'total_loaded_count': 10236,
'total_unloaded_count': 0
}
},
'thread_pool': {
'bulk': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'fetch_shard_started': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'fetch_shard_store': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'flush': {
'threads': 2,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 2,
'completed': 6
},
'force_merge': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'generic': {
'threads': 4,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 4,
'completed': 73
},
'get': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'index': {
'threads': 3,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 3,
'completed': 3
},
'listener': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'management': {
'threads': 3,
'queue': 0,
'active': 1,
'rejected': 0,
'largest': 3,
'completed': 77
},
'refresh': {
'threads': 1,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 1,
'completed': 588
},
'search': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'snapshot': {
'threads': 0,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 0,
'completed': 0
},
'warmer': {
'threads': 1,
'queue': 0,
'active': 0,
'rejected': 0,
'largest': 1,
'completed': 9
}
},
'fs': {
'timestamp': 1484861642369,
'total': {
'total_in_bytes': 233134567424,
'free_in_bytes': 92206276608,
'available_in_bytes': 80292356096,
'spins': 'true'
},
'data': [
{
'path': '/usr/share/elasticsearch/data/nodes/0',
'mount': '/usr/share/elasticsearch/data (/dev/mapper/ubuntu--vg-root)',
'type': 'ext4',
'total_in_bytes': 233134567424,
'free_in_bytes': 92206276608,
'available_in_bytes': 80292356096,
'spins': 'true'
}
],
'io_stats': {
'devices': [
{
'device_name': 'dm-0',
'operations': 22045,
'read_operations': 14349,
'write_operations': 7696,
'read_kilobytes': 294732,
'write_kilobytes': 113424
}
],
'total': {
'operations': 22045,
'read_operations': 14349,
'write_operations': 7696,
'read_kilobytes': 294732,
'write_kilobytes': 113424
}
}
},
'transport': {
'server_open': 0,
'rx_count': 8,
'rx_size_in_bytes': 3607,
'tx_count': 8,
'tx_size_in_bytes': 3607
},
'http': {
'current_open': 1,
'total_opened': 4
},
'breakers': {
'request': {
'limit_size_in_bytes': 1267571097,
'limit_size': '1.1gb',
'estimated_size_in_bytes': 0,
'estimated_size': '0b',
'overhead': 1.0,
'tripped': 0
},
'fielddata': {
'limit_size_in_bytes': 1267571097,
'limit_size': '1.1gb',
'estimated_size_in_bytes': 0,
'estimated_size': '0b',
'overhead': 1.03,
'tripped': 0
},
'in_flight_requests': {
'limit_size_in_bytes': 2112618496,
'limit_size': '1.9gb',
'estimated_size_in_bytes': 0,
'estimated_size': '0b',
'overhead': 1.0,
'tripped': 0
},
'parent': {
'limit_size_in_bytes': 1478832947,
'limit_size': '1.3gb',
'estimated_size_in_bytes': 0,
'estimated_size': '0b',
'overhead': 1.0,
'tripped': 0
}
},
'script': {
'compilations': 0,
'cache_evictions': 0
},
'discovery': {
'cluster_state_queue': {
'total': 0,
'pending': 0,
'committed': 0
}
},
'ingest': {
'total': {
'count': 0,
'time_in_millis': 0,
'current': 0,
'failed': 0
},
'pipelines': {}
}
}
}
}
expected = {
'os_cpu_percent{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 53,
'os_cpu_load_average_1m{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2.53,
'os_cpu_load_average_5m{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2.3,
'os_cpu_load_average_15m{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2.23,
'os_mem_total_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 16703762432,
'os_mem_free_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 164323328,
'os_mem_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 16539439104,
'os_mem_free_percent{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1,
'os_mem_used_percent{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 99,
'os_swap_free_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 12281872384,
'os_swap_total_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 17054035968,
'os_swap_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 4772163584,
'process_open_file_descriptors{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 180,
'process_max_file_descriptors{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1048576,
'process_cpu_percent{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'process_cpu_total_in_millis{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 28270,
'process_mem_total_virtual_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 5947977728,
'jvm_uptime_in_millis{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 614767,
'jvm_mem_heap_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 233688144,
'jvm_mem_heap_used_percent{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 11,
'jvm_mem_heap_committed_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2112618496,
'jvm_mem_heap_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2112618496,
'jvm_mem_non_heap_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 67167936,
'jvm_mem_non_heap_committed_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 71741440,
'jvm_mem_pools_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="young"}': 189809608,
'jvm_mem_pools_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="young"}': 279183360,
'jvm_mem_pools_peak_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="young"}': 279183360,
'jvm_mem_pools_peak_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="young"}': 279183360,
'jvm_mem_pools_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="survivor"}': 34865136,
'jvm_mem_pools_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="survivor"}': 34865152,
'jvm_mem_pools_peak_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="survivor"}': 34865136,
'jvm_mem_pools_peak_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="survivor"}': 34865152,
'jvm_mem_pools_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="old"}': 9013400,
'jvm_mem_pools_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="old"}': 1798569984,
'jvm_mem_pools_peak_used_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="old"}': 9013400,
'jvm_mem_pools_peak_max_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",pool="old"}': 1798569984,
'jvm_threads_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 40,
'jvm_threads_peak_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 46,
'jvm_gc_collectors_collection_count{collector="young",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2,
'jvm_gc_collectors_collection_time_in_millis{collector="young",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 189,
'jvm_gc_collectors_collection_count{collector="old",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1,
'jvm_gc_collectors_collection_time_in_millis{collector="old",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 143,
'jvm_buffer_pools_count{buffer_pool="direct",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 29,
'jvm_buffer_pools_used_in_bytes{buffer_pool="direct",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 87069546,
'jvm_buffer_pools_total_capacity_in_bytes{buffer_pool="direct",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 87069545,
'jvm_buffer_pools_count{buffer_pool="mapped",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 3,
'jvm_buffer_pools_used_in_bytes{buffer_pool="mapped",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 9658,
'jvm_buffer_pools_total_capacity_in_bytes{buffer_pool="mapped",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 9658,
'jvm_classes_current_loaded_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 10236,
'jvm_classes_total_loaded_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 10236,
'jvm_classes_total_unloaded_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="bulk"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_started"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="fetch_shard_store"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 2,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 2,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="flush"}': 6,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="force_merge"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 4,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 4,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="generic"}': 73,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="get"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 3,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 3,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="index"}': 3,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="listener"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 3,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 1,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 3,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="management"}': 77,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 1,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 1,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="refresh"}': 588,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="search"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="snapshot"}': 0,
'thread_pool_threads{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 1,
'thread_pool_rejected{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 0,
'thread_pool_active{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 0,
'thread_pool_queue{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 0,
'thread_pool_largest{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 1,
'thread_pool_completed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",thread_pool="warmer"}': 9,
'fs_total_total_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 233134567424,
'fs_total_free_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 92206276608,
'fs_total_available_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 80292356096,
'fs_data_total_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",path="/usr/share/elasticsearch/data/nodes/0"}': 233134567424,
'fs_data_free_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",path="/usr/share/elasticsearch/data/nodes/0"}': 92206276608,
'fs_data_available_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z",path="/usr/share/elasticsearch/data/nodes/0"}': 80292356096,
'fs_io_stats_devices_operations{device_name="dm-0",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 22045,
'fs_io_stats_devices_read_operations{device_name="dm-0",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 14349,
'fs_io_stats_devices_write_operations{device_name="dm-0",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 7696,
'fs_io_stats_devices_read_kilobytes{device_name="dm-0",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 294732,
'fs_io_stats_devices_write_kilobytes{device_name="dm-0",node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 113424,
'fs_io_stats_total_operations{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 22045,
'fs_io_stats_total_read_operations{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 14349,
'fs_io_stats_total_write_operations{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 7696,
'fs_io_stats_total_read_kilobytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 294732,
'fs_io_stats_total_write_kilobytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 113424,
'transport_server_open{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'transport_rx_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 8,
'transport_rx_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 3607,
'transport_tx_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 8,
'transport_tx_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 3607,
'http_current_open{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1,
'http_total_opened{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 4,
'breakers_request_limit_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1267571097,
'breakers_request_estimated_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_request_overhead{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1.0,
'breakers_request_tripped{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_fielddata_limit_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1267571097,
'breakers_fielddata_estimated_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_fielddata_overhead{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1.03,
'breakers_fielddata_tripped{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_in_flight_requests_limit_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 2112618496,
'breakers_in_flight_requests_estimated_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_in_flight_requests_overhead{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1.0,
'breakers_in_flight_requests_tripped{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_parent_limit_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1478832947,
'breakers_parent_estimated_size_in_bytes{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'breakers_parent_overhead{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 1.0,
'breakers_parent_tripped{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'script_compilations{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'script_cache_evictions{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'discovery_cluster_state_queue_total{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'discovery_cluster_state_queue_pending{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'discovery_cluster_state_queue_committed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'ingest_total_count{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'ingest_total_time_in_millis{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'ingest_total_current{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
'ingest_total_failed{node_id="bRcKq5zUTAuwNf4qvnXzIQ",node_name="bRcKq5z"}': 0,
}
result = convert_result(parse_response(response))
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 61.862069
| 153
| 0.506762
| 3,575
| 41,262
| 5.452028
| 0.091189
| 0.103843
| 0.262891
| 0.300446
| 0.791083
| 0.764045
| 0.751321
| 0.705659
| 0.652506
| 0.5894
| 0
| 0.078551
| 0.381707
| 41,262
| 666
| 154
| 61.954955
| 0.685442
| 0.010349
| 0
| 0.245399
| 1
| 0.004601
| 0.516472
| 0.435251
| 0
| 0
| 0
| 0
| 0.001534
| 1
| 0.001534
| false
| 0
| 0.004601
| 0
| 0.009202
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12402b3d786d7b2bb5cec294aeb528acfbed2ed7
| 23,880
|
py
|
Python
|
descent/ast.py
|
ethframe/descent
|
6ca34416953a6123daa280c519fca56c70bf5fee
|
[
"MIT"
] | 1
|
2018-07-12T13:34:06.000Z
|
2018-07-12T13:34:06.000Z
|
descent/ast.py
|
ethframe/descent
|
6ca34416953a6123daa280c519fca56c70bf5fee
|
[
"MIT"
] | null | null | null |
descent/ast.py
|
ethframe/descent
|
6ca34416953a6123daa280c519fca56c70bf5fee
|
[
"MIT"
] | null | null | null |
class char:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'char({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return char(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('char')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'char', 'value': self.val}
class octal:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'octal({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return octal(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('octal')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'octal', 'value': self.val}
class string:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'string({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return string(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('string')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'string', 'value': self.val}
class reference:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'reference({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return reference(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('reference')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'reference', 'value': self.val}
class rule:
__slots__ = ('name', 'expr')
def __init__(self, name=None, expr=None):
self.name = name
self.expr = expr
def __repr__(self):
return 'rule({!r}, {!r})'.format(
self.name,
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.expr == other.expr
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.expr)
def copy(self):
return rule(
self.name,
self.expr,
)
def append_name(self, val):
self.name = val
return self
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_name(self.name)
if self.expr is not None:
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'rule',
'name': self.name.to_dict(),
'expr': None if self.expr is None else self.expr.to_dict(),
}
class fail:
def __repr__(self):
return 'fail()'
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
return self.__class__ is other.__class__
def unapply1(self):
return self
def unapply(self):
return (self,)
def copy(self):
return self
def splice_to(self, other):
return other
def to_dict(self):
return {'__type__': 'fail'}
class char_any:
def __repr__(self):
return 'char_any()'
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
return self.__class__ is other.__class__
def unapply1(self):
return self
def unapply(self):
return (self,)
def copy(self):
return self
def splice_to(self, other):
return other
def to_dict(self):
return {'__type__': 'char_any'}
class char_range:
__slots__ = ('start', 'end')
def __init__(self, start=None, end=None):
self.start = start
self.end = end
def __repr__(self):
return 'char_range({!r}, {!r})'.format(
self.start,
self.end,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.start == other.start
and self.end == other.end
)
def unapply1(self):
return self
def unapply(self):
return (self.start, self.end)
def copy(self):
return char_range(
self.start,
self.end,
)
def append_start(self, val):
self.start = val
return self
def append_end(self, val):
self.end = val
return self
def splice_to(self, other, converters):
other.append_start(self.start)
other.append_end(self.end)
return other
def to_dict(self):
return {
'__type__': 'char_range',
'start': self.start.to_dict(),
'end': self.end.to_dict(),
}
class append:
__slots__ = ('expr', 'name')
def __init__(self, expr=None, name=None):
self.expr = expr
self.name = name
def __repr__(self):
return 'append({!r}, {!r})'.format(
self.expr,
self.name,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.name == other.name
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.name)
def copy(self):
return append(
self.expr,
self.name,
)
def append_expr(self, val):
self.expr = val
return self
def append_name(self, val):
self.name = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_name(self.name)
return other
def to_dict(self):
return {
'__type__': 'append',
'expr': self.expr.to_dict(),
'name': self.name.to_dict(),
}
class top:
__slots__ = ('expr', 'name')
def __init__(self, expr=None, name=None):
self.expr = expr
self.name = name
def __repr__(self):
return 'top({!r}, {!r})'.format(
self.expr,
self.name,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.name == other.name
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.name)
def copy(self):
return top(
self.expr,
self.name,
)
def append_expr(self, val):
self.expr = val
return self
def append_name(self, val):
self.name = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_name(self.name)
return other
def to_dict(self):
return {
'__type__': 'top',
'expr': self.expr.to_dict(),
'name': self.name.to_dict(),
}
class splice:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'splice({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return splice(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'splice',
'expr': self.expr.to_dict(),
}
class top_splice:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'top_splice({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return top_splice(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'top_splice',
'expr': self.expr.to_dict(),
}
class ignore:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'ignore({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return ignore(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'ignore',
'expr': self.expr.to_dict(),
}
class node:
def __init__(self, val=''):
self.val = val
def __str__(self):
return self.val
def __repr__(self):
return 'node({!r})'.format(self.val)
def __hash__(self):
return hash((self.__class__, self.val))
def __eq__(self, other):
return self.__class__ is other.__class__ and self.val == other.val
def unapply1(self):
return self.val
def unapply(self):
return (self.val,)
def copy(self):
return node(self.val)
def consume(self, val):
self.val += val
return self
def splice_to(self, other, converters):
converter = converters.get('node')
if converter:
return other.consume(converter(self.val))
return other.consume(self.val)
def to_dict(self):
return {'__type__': 'node', 'value': self.val}
class optional:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'optional({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return optional(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'optional',
'expr': self.expr.to_dict(),
}
class repeat:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'repeat({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return repeat(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'repeat',
'expr': self.expr.to_dict(),
}
class repeat1:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'repeat1({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return repeat1(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'repeat1',
'expr': self.expr.to_dict(),
}
class replace:
__slots__ = ('expr', 'value')
def __init__(self, expr=None, value=None):
self.expr = expr
self.value = value
def __repr__(self):
return 'replace({!r}, {!r})'.format(
self.expr,
self.value,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
and self.value == other.value
)
def unapply1(self):
return self
def unapply(self):
return (self.expr, self.value)
def copy(self):
return replace(
self.expr,
self.value,
)
def append_expr(self, val):
self.expr = val
return self
def append_value(self, val):
self.value = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
other.append_value(self.value)
return other
def to_dict(self):
return {
'__type__': 'replace',
'expr': self.expr.to_dict(),
'value': self.value.to_dict(),
}
class follow:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'follow({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return follow(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'follow',
'expr': self.expr.to_dict(),
}
class not_follow:
__slots__ = ('expr',)
def __init__(self, expr=None):
self.expr = expr
def __repr__(self):
return 'not_follow({!r})'.format(
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.expr == other.expr
)
def unapply1(self):
return self.expr
def unapply(self):
return (self.expr,)
def copy(self):
return not_follow(
self.expr,
)
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'not_follow',
'expr': self.expr.to_dict(),
}
class choice:
__slots__ = ('items',)
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return 'choice({!r})'.format(
self.items,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.items == other.items
)
def unapply1(self):
return self.items
def unapply(self):
return (self.items,)
def copy(self):
return choice(
list(self.items),
)
def append_items(self, val):
self.items.append(val)
return self
def extend_items(self, val):
self.items.extend(val)
return self
def splice_to(self, other, converters):
other.extend_items(self.items)
return other
def to_dict(self):
return {
'__type__': 'choice',
'items': [i.to_dict() for i in self.items],
}
class sequence:
__slots__ = ('items',)
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return 'sequence({!r})'.format(
self.items,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.items == other.items
)
def unapply1(self):
return self.items
def unapply(self):
return (self.items,)
def copy(self):
return sequence(
list(self.items),
)
def append_items(self, val):
self.items.append(val)
return self
def extend_items(self, val):
self.items.extend(val)
return self
def splice_to(self, other, converters):
other.extend_items(self.items)
return other
def to_dict(self):
return {
'__type__': 'sequence',
'items': [i.to_dict() for i in self.items],
}
class expand:
__slots__ = ('name', 'args')
def __init__(self, name=None, args=None):
self.name = name
self.args = args or []
def __repr__(self):
return 'expand({!r}, {!r})'.format(
self.name,
self.args,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.args == other.args
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.args)
def copy(self):
return expand(
self.name,
list(self.args),
)
def append_name(self, val):
self.name = val
return self
def append_args(self, val):
self.args.append(val)
return self
def extend_args(self, val):
self.args.extend(val)
return self
def splice_to(self, other, converters):
other.append_name(self.name)
other.extend_args(self.args)
return other
def to_dict(self):
return {
'__type__': 'expand',
'name': self.name.to_dict(),
'args': [i.to_dict() for i in self.args],
}
class macro:
__slots__ = ('name', 'args', 'expr')
def __init__(self, name=None, args=None, expr=None):
self.name = name
self.args = args or []
self.expr = expr
def __repr__(self):
return 'macro({!r}, {!r}, {!r})'.format(
self.name,
self.args,
self.expr,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.name == other.name
and self.args == other.args
and self.expr == other.expr
)
def unapply1(self):
return self
def unapply(self):
return (self.name, self.args, self.expr)
def copy(self):
return macro(
self.name,
list(self.args),
self.expr,
)
def append_name(self, val):
self.name = val
return self
def append_args(self, val):
self.args.append(val)
return self
def extend_args(self, val):
self.args.extend(val)
return self
def append_expr(self, val):
self.expr = val
return self
def splice_to(self, other, converters):
other.append_name(self.name)
other.extend_args(self.args)
other.append_expr(self.expr)
return other
def to_dict(self):
return {
'__type__': 'macro',
'name': self.name.to_dict(),
'args': [i.to_dict() for i in self.args],
'expr': self.expr.to_dict(),
}
class grammar:
__slots__ = ('rules',)
def __init__(self, rules=None):
self.rules = rules or []
def __repr__(self):
return 'grammar({!r})'.format(
self.rules,
)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.rules == other.rules
)
def unapply1(self):
return self.rules
def unapply(self):
return (self.rules,)
def copy(self):
return grammar(
list(self.rules),
)
def append_rules(self, val):
self.rules.append(val)
return self
def extend_rules(self, val):
self.rules.extend(val)
return self
def splice_to(self, other, converters):
other.extend_rules(self.rules)
return other
def to_dict(self):
return {
'__type__': 'grammar',
'rules': [i.to_dict() for i in self.rules],
}
types_map = {
'char': char,
'octal': octal,
'string': string,
'reference': reference,
'rule': rule,
'fail': fail,
'char_any': char_any,
'char_range': char_range,
'append': append,
'top': top,
'splice': splice,
'top_splice': top_splice,
'ignore': ignore,
'node': node,
'optional': optional,
'repeat': repeat,
'repeat1': repeat1,
'replace': replace,
'follow': follow,
'not_follow': not_follow,
'choice': choice,
'sequence': sequence,
'expand': expand,
'macro': macro,
'grammar': grammar,
}
| 20.78329
| 74
| 0.534673
| 2,746
| 23,880
| 4.323015
| 0.025127
| 0.115407
| 0.067223
| 0.048522
| 0.870693
| 0.831354
| 0.811389
| 0.783422
| 0.769775
| 0.765732
| 0
| 0.001993
| 0.348576
| 23,880
| 1,149
| 75
| 20.78329
| 0.761121
| 0
| 0
| 0.70814
| 0
| 0
| 0.04782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.286047
| false
| 0
| 0
| 0.190698
| 0.601163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
12491375bd7a509be67c89ded4d8f384ea4a59bc
| 80,550
|
py
|
Python
|
utils/CoDetModel.py
|
ai4ce/DiscoNet
|
44b57faac3c5be289d33cbbab12b300e3ac767b0
|
[
"MIT"
] | 80
|
2021-10-24T00:56:14.000Z
|
2022-03-22T18:11:40.000Z
|
utils/CoDetModel.py
|
ai4ce/DiscoNet
|
44b57faac3c5be289d33cbbab12b300e3ac767b0
|
[
"MIT"
] | 1
|
2021-11-18T16:04:38.000Z
|
2021-11-20T22:23:58.000Z
|
utils/CoDetModel.py
|
ai4ce/DiscoNet
|
44b57faac3c5be289d33cbbab12b300e3ac767b0
|
[
"MIT"
] | 12
|
2021-11-01T11:29:14.000Z
|
2022-03-28T16:22:38.000Z
|
'''
/************************************************************************
MIT License
Copyright (c) 2021 AI4CE Lab@NYU, MediaBrain Group@SJTU
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*************************************************************************/
/**
* @file CoDetModel.py
* @author YIMING LI (https://roboticsyimingli.github.io/)
* @date 10/10/2021
* @version 1.0
*
* @brief Co-det Models of Collaborative BEV Detection
*
* @section DESCRIPTION
*
* This is official implementation for: NeurIPS 2021 Learning Distilled Collaboration Graph for Multi-Agent Perception
*
*/
'''
import torch.nn.functional as F
import torch.nn as nn
import torch
from utils.model import *
import numpy as np
import copy
import torchgeometry as tgm
import random
import convolutional_rnn as convrnn
class DiscoNet(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(DiscoNet, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.kd_flag = kd_flag
self.layer = layer
self.ModulationLayer3 = ModulationLayer3(config)
if self.layer ==3:
self.PixelWeightedFusion = PixelWeightedFusionSoftmax(256)
elif self.layer ==2:
self.PixelWeightedFusion = PixelWeightedFusionSoftmax(128)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
save_agent_weight_list = list()
p = np.array([1.0, 0.0])
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
neighbor_feat_list = list()
neighbor_feat_list.append(tg_agent)
#com_outage = random.randint(0,1)
p_com_outage = np.random.choice([0, 1], p=p.ravel())
if p_com_outage==1:
agent_wise_weight_feat = neighbor_feat_list[0]
else:
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # for grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # for grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# agent-wise weighted fusion
tmp_agent_weight_list =list()
sum_weight = 0
for k in range(num_agent):
cat_feat = torch.cat([tg_agent, neighbor_feat_list[k]], dim=0)
cat_feat = cat_feat.unsqueeze(0)
AgentWeight = torch.squeeze(self.PixelWeightedFusion(cat_feat))
tmp_agent_weight_list.append(torch.exp(AgentWeight))
sum_weight = sum_weight + torch.exp(AgentWeight)
agent_weight_list = list()
for k in range(num_agent):
AgentWeight = torch.div(tmp_agent_weight_list[k], sum_weight)
AgentWeight.expand([256, -1, -1])
agent_weight_list.append(AgentWeight)
agent_wise_weight_feat = 0
for k in range(num_agent):
agent_wise_weight_feat = agent_wise_weight_feat + agent_weight_list[k]*neighbor_feat_list[k]
# feature update
local_com_mat_update[b, i] = agent_wise_weight_feat
#save_agent_weight_list.append(agent_weight_list)
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class V2VNet(nn.Module):
def __init__(self, config, gnn_iter_times, layer, layer_channel, in_channels=13):
super(V2VNet, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.layer = layer
self.layer_channel = layer_channel
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
self.gnn_iter_num = gnn_iter_times
self.convgru = convrnn.Conv2dGRU(in_channels=self.layer_channel * 2,
out_channels=self.layer_channel,
kernel_size=3,
num_layers=1,
bidirectional=False,
dilation=1,
stride=1)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
# trans_matrices [batch 5 5 4 4]
# num_agent_tensor, shape: [batch, num_agent]; how many non-empty agent in this scene
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
p = np.array([1.0, 0.0])
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
agent_feat_list = list()
for nb in range(self.agent_num): # self.agent_num = 5
agent_feat_list.append(local_com_mat[b, nb])
for _ in range(self.gnn_iter_num):
updated_feats_list = list()
for i in range(num_agent):
neighbor_feat_list = list()
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
com_outage = np.random.choice([0, 1], p=p.ravel())
if com_outage == 0:
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(agent_feat_list[j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
mean_feat = torch.mean(torch.stack(neighbor_feat_list), dim=0) # [c, h, w]
cat_feat = torch.cat([agent_feat_list[i], mean_feat], dim=0)
cat_feat = cat_feat.unsqueeze(0).unsqueeze(0) # [1, 1, c, h, w]
updated_feat, _ = self.convgru(cat_feat, None)
updated_feat = torch.squeeze(torch.squeeze(updated_feat, 0), 0) # [c, h, w]
updated_feats_list.append(updated_feat)
else:
updated_feats_list.append(agent_feat_list[i])
agent_feat_list = updated_feats_list
for k in range(num_agent):
local_com_mat_update[b, k] = agent_feat_list[k]
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
return result
class When2com(nn.Module):
def __init__(self, config, n_classes=21, in_channels=13, feat_channel=512, feat_squeezer=-1, attention='additive',
has_query=True, sparse=False, layer=3, warp_flag=1, image_size=512,
shared_img_encoder='unified', key_size=1024, query_size=32):
super(When2com, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.sparse = sparse
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.key_size = key_size
self.query_size = query_size
self.shared_img_encoder = shared_img_encoder
self.has_query = has_query
self.warp_flag = warp_flag
self.layer = layer
self.key_net = km_generator(out_size=self.key_size, input_feat_sz=image_size / 32)
self.attention_net = MIMOGeneralDotProductAttention(self.query_size, self.key_size, self.warp_flag)
# # Message generator
self.query_key_net = policy_net4(in_channels=in_channels)
if self.has_query:
self.query_net = km_generator(out_size=self.query_size, input_feat_sz=image_size / 32)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
# List the parameters of each modules
self.attention_paras = list(self.attention_net.parameters())
if self.shared_img_encoder == 'unified':
self.img_net_paras = list(self.u_encoder.parameters()) + list(self.decoder.parameters())
self.policy_net_paras = list(self.query_key_net.parameters()) + list(
self.key_net.parameters()) + self.attention_paras
if self.has_query:
self.policy_net_paras = self.policy_net_paras + list(self.query_net.parameters())
self.all_paras = self.img_net_paras + self.policy_net_paras
if self.motion_state:
self.motion_cls = MotionStateHead(config)
def argmax_select(self, warp_flag, val_mat, prob_action):
# v(batch, query_num, channel, size, size)
cls_num = prob_action.shape[1]
coef_argmax = F.one_hot(prob_action.max(dim=1)[1], num_classes=cls_num).type(torch.cuda.FloatTensor)
coef_argmax = coef_argmax.transpose(1, 2)
attn_shape = coef_argmax.shape
bats, key_num, query_num = attn_shape[0], attn_shape[1], attn_shape[2]
coef_argmax_exp = coef_argmax.view(bats, key_num, query_num, 1, 1, 1)
if warp_flag==1:
v_exp = val_mat
else:
v_exp = torch.unsqueeze(val_mat, 2)
v_exp = v_exp.expand(-1, -1, query_num, -1, -1, -1)
output = coef_argmax_exp * v_exp # (batch,4,channel,size,size)
feat_argmax = output.sum(1) # (batch,1,channel,size,size)
# compute connect
count_coef = copy.deepcopy(coef_argmax)
ind = np.diag_indices(self.agent_num)
count_coef[:, ind[0], ind[1]] = 0
num_connect = torch.nonzero(count_coef).shape[0] / (self.agent_num * count_coef.shape[0])
return feat_argmax, coef_argmax, num_connect
def activated_select(self, warp_flag, val_mat, prob_action, thres=0.2):
coef_act = torch.mul(prob_action, (prob_action > thres).float())
attn_shape = coef_act.shape
bats, key_num, query_num = attn_shape[0], attn_shape[1], attn_shape[2]
coef_act_exp = coef_act.view(bats, key_num, query_num, 1, 1, 1)
if warp_flag==1:
v_exp = val_mat
else:
v_exp = torch.unsqueeze(val_mat, 2)
v_exp = v_exp.expand(-1, -1, query_num, -1, -1, -1)
output = coef_act_exp * v_exp # (batch,4,channel,size,size)
feat_act = output.sum(1) # (batch,1,channel,size,size)
# compute connect
count_coef = coef_act.clone()
ind = np.diag_indices(self.agent_num)
count_coef[:, ind[0], ind[1]] = 0
num_connect = torch.nonzero(count_coef).shape[0] / (self.agent_num * count_coef.shape[0])
return feat_act, coef_act, num_connect
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, maps=None, vis=None, training=True, MO_flag=True, inference='activated', batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
# vis = vis.permute(0, 3, 1, 2)
# pass encoder
x,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
if self.warp_flag:
size = (1, 512, 16, 16)
val_mat = torch.zeros(batch_size, 5, 5, 512, 16, 16).to(device)
elif self.layer ==3:
feat_maps = x_3
if self.warp_flag:
size = (1, 256, 32, 32)
val_mat = torch.zeros(batch_size, 5, 5, 256, 32, 32).to(device)
elif self.layer == 2:
feat_maps = x_2
if self.warp_flag:
size = (1, 128, 64, 64)
val_mat = torch.zeros(batch_size, 5, 5, 128, 64, 64).to(device)
# get feat maps for each agent
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
generate value matrix for each agent, Yiming, 2021.4.22
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
if self.warp_flag==1:
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j==i:
val_mat[b, i, j] = tg_agent
else:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
val_mat[b, i, j] = warp_feat
else:
val_mat = torch.cat(tuple(feat_list), 1)
# pass feature maps through key and query generator
query_key_maps = self.query_key_net(bevs)
keys = self.key_net(query_key_maps)
if self.has_query:
querys = self.query_net(query_key_maps)
# get key and query
key = {}
query = {}
key_list = []
query_list = []
for i in range(self.agent_num):
key[i] = torch.unsqueeze(keys[batch_size * i:batch_size * (i + 1)], 1)
key_list.append(key[i])
if self.has_query:
query[i] = torch.unsqueeze(querys[batch_size * i:batch_size * (i + 1)], 1)
else:
query[i] = torch.ones(batch_size, 1, self.query_size).to('cuda')
query_list.append(query[i])
key_mat = torch.cat(tuple(key_list), 1)
query_mat = torch.cat(tuple(query_list), 1)
if MO_flag:
query_mat = query_mat
else:
query_mat = torch.unsqueeze(query_mat[:,0,:],1)
feat_fuse, prob_action = self.attention_net(query_mat, key_mat, val_mat, sparse=self.sparse)
#print(query_mat.shape, key_mat.shape, val_mat.shape, feat_fuse.shape)
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(feat_fuse)
if self.layer ==4:
x = self.decoder(x,x_1,x_2,x_3,feat_fuse_mat,batch_size)
elif self.layer ==3:
x = self.decoder(x,x_1,x_2,feat_fuse_mat,x_4,batch_size)
elif self.layer == 2:
x = self.decoder(x,x_1,feat_fuse_mat,x_3,x_4,batch_size)
# not related to how we combine the feature (prefer to use the agnets' own frames: to reduce the bandwidth)
small_bis = torch.eye(prob_action.shape[1])*0.001
small_bis = small_bis.reshape((1, prob_action.shape[1], prob_action.shape[2]))
small_bis = small_bis.repeat(prob_action.shape[0], 1, 1).cuda()
prob_action = prob_action + small_bis
if training:
action = torch.argmax(prob_action, dim=1)
num_connect = self.agent_num - 1
else:
if inference == 'softmax':
action = torch.argmax(prob_action, dim=1)
num_connect = self.agent_num - 1
elif inference == 'argmax_test':
print('argmax_test')
feat_argmax, connect_mat, num_connect = self.argmax_select(self.warp_flag, val_mat, prob_action)
feat_argmax_mat = self.agents2batch(feat_argmax) # (batchsize*agent_num, channel, size, size)
feat_argmax_mat = feat_argmax_mat.detach()
pred_argmax = self.decoder(x, x_1, x_2, feat_argmax_mat, x_4, batch_size)
action = torch.argmax(connect_mat, dim=1)
#return pred_argmax, prob_action, action, num_connect
x=pred_argmax
elif inference == 'activated':
print('activated')
feat_act, connect_mat, num_connect = self.activated_select(self.warp_flag, val_mat, prob_action)
feat_act_mat = self.agents2batch(feat_act) # (batchsize*agent_num, channel, size, size)
feat_act_mat = feat_act_mat.detach()
if self.layer ==4:
pred_act = self.decoder(x, x_1, x_2, x_3, feat_act_mat,batch_size)
elif self.layer == 3:
pred_act = self.decoder(x, x_1, x_2, feat_act_mat, x_4, batch_size)
elif self.layer == 2:
pred_act = self.decoder(x, x_1, feat_act_mat, x_3, x_4, batch_size)
action = torch.argmax(connect_mat, dim=1)
#return pred_act, prob_action, action, num_connect
x=pred_act
else:
raise ValueError('Incorrect inference mode')
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
class SumFusion(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(SumFusion, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.layer = layer
self.kd_flag = kd_flag
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
neighbor_feat_list = list()
neighbor_feat_list.append(tg_agent)
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# mean fusion
sum_feat = torch.sum(torch.stack(neighbor_feat_list), dim=0) # [c, h, w]
# feature update
local_com_mat_update[b, i] = sum_feat
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class MeanFusion(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(MeanFusion, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.kd_flag = kd_flag
self.layer = layer
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
neighbor_feat_list = list()
neighbor_feat_list.append(tg_agent)
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# mean fusion
mean_feat = torch.mean(torch.stack(neighbor_feat_list), dim=0) # [c, h, w]
# feature update
local_com_mat_update[b, i] = mean_feat
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class MaxFusion(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(MaxFusion, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.kd_flag = kd_flag
self.layer = layer
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
neighbor_feat_list = list()
neighbor_feat_list.append(tg_agent)
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# mean fusion
max_feat = torch.max(torch.stack(neighbor_feat_list), dim=0) # [c, h, w]
# feature update
local_com_mat_update[b, i] = max_feat.values
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class CatFusion(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(CatFusion, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.kd_flag = kd_flag
self.layer = layer
self.ModulationLayer3 = ModulationLayer3(config)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
neighbor_feat_list = list()
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# sum fusion
# tg_agent = tg_agent + warp_feat.type(dtype=torch.float32)
# cat fusion
mean_feat = torch.mean(torch.stack(neighbor_feat_list), dim=0) # [c, h, w]
cat_feat = torch.cat([tg_agent, mean_feat], dim=0)
cat_feat = cat_feat.unsqueeze(0) # [1, 1, c, h, w]
modulation_feat = self.ModulationLayer3(cat_feat)
# feature update
local_com_mat_update[b, i] = modulation_feat
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class AgentwiseWeightedFusion(nn.Module):
def __init__(self, config, layer=3, in_channels=13, kd_flag=True):
super(AgentwiseWeightedFusion, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.u_encoder = lidar_encoder(height_feat_size=in_channels)
self.agent_num = 5
self.kd_flag = kd_flag
self.layer = layer
self.ModulationLayer3 = ModulationLayer3(config)
self.AgentWeightedFusion = AgentWeightedFusion(config)
# Detection decoder
self.decoder = lidar_decoder(height_feat_size=in_channels)
def agents2batch(self, feats):
agent_num = feats.shape[1]
feat_list = []
for i in range(agent_num):
feat_list.append(feats[:, i, :, :, :])
feat_mat = torch.cat(tuple(feat_list), 0)
return feat_mat
def forward(self, bevs, trans_matrices, num_agent_tensor, batch_size=1):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
x_0,x_1,x_2,x_3,x_4 = self.u_encoder(bevs)
device = bevs.device
if self.layer ==4:
feat_maps = x_4
size = (1, 512, 16, 16)
elif self.layer ==3:
feat_maps = x_3
size = (1, 256, 32, 32)
elif self.layer == 2:
feat_maps = x_2
size = (1, 128, 64, 64)
elif self.layer == 1:
feat_maps = x_1
size = (1, 64, 128, 128)
elif self.layer == 0:
feat_maps = x_0
size = (1, 32, 256, 256)
# print(feat_maps.shape, x_3.shape, x_2.shape, x_1.shape)
# get feat maps for each agent [10 512 16 16] -> [2 5 512 16 16]
feat_map = {}
feat_list = []
for i in range(self.agent_num):
feat_map[i] = torch.unsqueeze(feat_maps[batch_size * i:batch_size * (i + 1)], 1)
feat_list.append(feat_map[i])
local_com_mat = torch.cat(tuple(feat_list), 1) # [2 5 512 16 16] [batch, agent, channel, height, width]
local_com_mat_update = torch.cat(tuple(feat_list), 1) # to avoid the inplace operation
for b in range(batch_size):
num_agent = num_agent_tensor[b, 0]
for i in range(num_agent):
tg_agent = local_com_mat[b, i]
all_warp = trans_matrices[b, i] # transformation [2 5 5 4 4]
neighbor_feat_list = list()
neighbor_feat_list.append(tg_agent)
for j in range(num_agent):
if j != i:
nb_agent = torch.unsqueeze(local_com_mat[b, j], 0) # [1 512 16 16]
nb_warp = all_warp[j] # [4 4]
# normalize the translation vector
x_trans = (4*nb_warp[0, 3])/128
y_trans = -(4*nb_warp[1, 3])/128
theta_rot = torch.tensor([[nb_warp[0,0], nb_warp[0,1], 0.0], [nb_warp[1,0], nb_warp[1,1], 0.0]]).type(dtype=torch.float).to(device)
theta_rot = torch.unsqueeze(theta_rot, 0)
grid_rot = F.affine_grid(theta_rot, size=torch.Size(size)) # 得到grid 用于grid sample
theta_trans = torch.tensor([[1.0, 0.0, x_trans], [0.0, 1.0, y_trans]]).type(dtype=torch.float).to(device)
theta_trans = torch.unsqueeze(theta_trans, 0)
grid_trans = F.affine_grid(theta_trans, size=torch.Size(size)) # 得到grid 用于grid sample
#first rotate the feature map, then translate it
warp_feat_rot = F.grid_sample(nb_agent, grid_rot, mode='bilinear')
warp_feat_trans = F.grid_sample(warp_feat_rot, grid_trans, mode='bilinear')
warp_feat = torch.squeeze(warp_feat_trans)
neighbor_feat_list.append(warp_feat)
# agent-wise weighted fusion
agent_weight_list =list()
for k in range(num_agent):
cat_feat = torch.cat([tg_agent, neighbor_feat_list[k]], dim=0)
cat_feat = cat_feat.unsqueeze(0)
AgentWeight = self.AgentWeightedFusion(cat_feat)
agent_weight_list.append(AgentWeight)
soft_agent_weight_list = torch.squeeze(F.softmax(torch.tensor(agent_weight_list).unsqueeze(0), dim=1))
agent_wise_weight_feat = 0
for k in range(num_agent):
agent_wise_weight_feat = agent_wise_weight_feat + soft_agent_weight_list[k]*neighbor_feat_list[k]
# feature update
local_com_mat_update[b, i] = agent_wise_weight_feat
# weighted feature maps is passed to decoder
feat_fuse_mat = self.agents2batch(local_com_mat_update)
if self.kd_flag == 1:
if self.layer ==4:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x_8, x_7, x_6, x_5 = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x_8, x_7, x_6, x_5 = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x_8, x_7, x_6, x_5 = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
x = x_8
else:
if self.layer ==4:
x = self.decoder(x_0,x_1,x_2,x_3,feat_fuse_mat,batch_size, kd_flag = self.kd_flag)
elif self.layer == 3:
x = self.decoder(x_0,x_1,x_2,feat_fuse_mat,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 2:
x = self.decoder(x_0,x_1,feat_fuse_mat,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 1:
x = self.decoder(x_0,feat_fuse_mat,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
elif self.layer == 0:
x = self.decoder(feat_fuse_mat,x_1,x_2,x_3,x_4,batch_size, kd_flag = self.kd_flag)
# vis = vis.permute(0, 3, 1, 2)
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
# Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
if self.kd_flag == 1:
return result, x_8, x_7, x_6, x_5, feat_fuse_mat
else:
return result
class ModulationLayer3(nn.Module):
def __init__(self,config):
super(ModulationLayer3, self).__init__()
self.conv1_1 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.bn1_1 = nn.BatchNorm2d(256)
def forward(self, x):
x = x.view(-1, x.size(-3), x.size(-2), x.size(-1))
x_1 = F.relu(self.bn1_1(self.conv1_1(x)))
return x_1
class PixelWeightedFusionSoftmax(nn.Module):
def __init__(self,channel):
super(PixelWeightedFusionSoftmax, self).__init__()
self.conv1_1 = nn.Conv2d(channel*2, 128, kernel_size=1, stride=1, padding=0)
self.bn1_1 = nn.BatchNorm2d(128)
self.conv1_2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0)
self.bn1_2 = nn.BatchNorm2d(32)
self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0)
self.bn1_3 = nn.BatchNorm2d(8)
self.conv1_4 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0)
# self.bn1_4 = nn.BatchNorm2d(1)
def forward(self, x):
x = x.view(-1, x.size(-3), x.size(-2), x.size(-1))
x_1 = F.relu(self.bn1_1(self.conv1_1(x)))
x_1 = F.relu(self.bn1_2(self.conv1_2(x_1)))
x_1 = F.relu(self.bn1_3(self.conv1_3(x_1)))
x_1 = F.relu(self.conv1_4(x_1))
return x_1
class AgentWeightedFusion(nn.Module):
def __init__(self,config):
super(AgentWeightedFusion, self).__init__()
self.conv1_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0)
self.bn1_1 = nn.BatchNorm2d(128)
self.conv1_2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, padding=0)
self.bn1_2 = nn.BatchNorm2d(32)
self.conv1_3 = nn.Conv2d(32, 8, kernel_size=1, stride=1, padding=0)
self.bn1_3 = nn.BatchNorm2d(8)
self.conv1_4 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0)
# self.conv1_1 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
# self.bn1_1 = nn.BatchNorm2d(1)
self.conv1_5 = nn.Conv2d(1, 1, kernel_size=32, stride=1, padding=0)
# # self.bn1_2 = nn.BatchNorm2d(1)
def forward(self, x):
# x = x.view(-1, x.size(-3), x.size(-2), x.size(-1))
# x_1 = F.relu(self.bn1_1(self.conv1_1(x)))
# x_1 = F.sigmoid(self.conv1_2(x_1))
x = x.view(-1, x.size(-3), x.size(-2), x.size(-1))
x_1 = F.relu(self.bn1_1(self.conv1_1(x)))
x_1 = F.relu(self.bn1_2(self.conv1_2(x_1)))
x_1 = F.relu(self.bn1_3(self.conv1_3(x_1)))
x_1 = F.relu(self.conv1_4(x_1))
x_1 = F.relu(self.conv1_5(x_1))
return x_1
class ClassificationHead(nn.Module):
def __init__(self, config):
super(ClassificationHead, self).__init__()
category_num = config.category_num
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
self.conv1 = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(channel, category_num*anchor_num_per_loc, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(channel)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.conv2(x)
return x
class SingleRegressionHead(nn.Module):
def __init__(self,config):
super(SingleRegressionHead,self).__init__()
category_num = config.category_num
channel = 32
if config.use_map:
channel += 6
if config.use_vis:
channel += 13
anchor_num_per_loc = len(config.anchor_size)
box_code_size = config.box_code_size
if config.only_det:
out_seq_len = 1
else:
out_seq_len = config.pred_len
if config.binary:
if config.only_det:
self.box_prediction = nn.Sequential(
nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(channel),
nn.ReLU(),
nn.Conv2d(channel, anchor_num_per_loc * box_code_size * out_seq_len, kernel_size=1, stride=1, padding=0))
else:
self.box_prediction = nn.Sequential(
nn.Conv2d(channel, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, anchor_num_per_loc * box_code_size * out_seq_len, kernel_size=1, stride=1, padding=0))
def forward(self,x):
box = self.box_prediction(x)
return box
class TeacherNet(nn.Module):
def __init__(self, config):
super(TeacherNet, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
# self.RegressionList = nn.ModuleList([RegressionHead for i in range(seq_len)])
self.regression = SingleRegressionHead(config)
# self.fusion_method = config.fusion_method
# if self.use_map:
# if self.fusion_method == 'early_fusion':
# self.stpn = STPN(height_feat_size=config.map_dims[2]+config.map_channel)
# elif self.fusion_method == 'middle_fusion':
# self.stpn = STPN(height_feat_size=config.map_dims[2],use_map=True)
# elif self.fusion_method == 'late_fusion':
# self.map_encoder = MapExtractor(map_channel=config.map_channel)
# self.stpn = STPN(height_feat_size=config.map_dims[2])
# else:
self.stpn = STPN_KD(height_feat_size=config.map_dims[2])
# if self.motion_state:
# self.motion_cls = MotionStateHead(config)
def forward(self, bevs, maps=None, vis=None):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
# vis = vis.permute(0, 3, 1, 2)
x_8, x_7, x_6, x_5, x_3, x_2 = self.stpn(bevs)
return x_8, x_7, x_6, x_5, x_3, x_2
class FaFNet(nn.Module):
def __init__(self, config):
super(FaFNet, self).__init__()
self.motion_state = config.motion_state
if config.only_det:
self.out_seq_len = 1
else:
self.out_seq_len = config.pred_len
self.box_code_size = config.box_code_size
self.category_num = config.category_num
self.use_map = config.use_map
self.anchor_num_per_loc = len(config.anchor_size)
self.classification = ClassificationHead(config)
self.regression = SingleRegressionHead(config)
self.stpn = STPN_KD(height_feat_size=config.map_dims[2])
def forward(self, bevs, maps=None, vis=None, batch_size=None):
bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)
# vis = vis.permute(0, 3, 1, 2)
x_8, x_7, x_6, x_5, x_3, x_2 = self.stpn(bevs)
x = x_8
# if not maps is None:
# x = torch.cat([x,maps],axis=-1)
# if not vis is None:
# x = torch.cat([x,vis],axis=1)
#Cell Classification head
cls_preds = self.classification(x)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.view(cls_preds.shape[0],-1,self.category_num)
# Detection head
loc_preds =self.regression(x)
loc_preds = loc_preds.permute(0, 2, 3, 1).contiguous()
loc_preds = loc_preds.view(-1,loc_preds.size(1),loc_preds.size(2),self.anchor_num_per_loc,self.out_seq_len,self.box_code_size)
#loc_pred (N * T * W * H * loc)
result = {'loc': loc_preds,
'cls': cls_preds}
#MotionState head
if self.motion_state:
motion_cat = 3
motion_cls_preds = self.motion_cls(x)
motion_cls_preds = motion_cls_preds.permute(0, 2, 3, 1).contiguous()
motion_cls_preds = motion_cls_preds.view(cls_preds.shape[0],-1,motion_cat)
result['state'] = motion_cls_preds
return result
class policy_net4(nn.Module):
def __init__(self, in_channels=13, input_feat_sz=32):
super(policy_net4, self).__init__()
feat_map_sz = input_feat_sz // 4
self.n_feat = int(256 * feat_map_sz * feat_map_sz)
self.lidar_encoder = lidar_encoder(height_feat_size=in_channels)
# Encoder
# down 1
self.conv1 = conv2DBatchNormRelu(512, 512, k_size=3, stride=1, padding=1)
self.conv2 = conv2DBatchNormRelu(512, 256, k_size=3, stride=1, padding=1)
self.conv3 = conv2DBatchNormRelu(256, 256, k_size=3, stride=2, padding=1)
# down 2
self.conv4 = conv2DBatchNormRelu(256, 256, k_size=3, stride=1, padding=1)
self.conv5 = conv2DBatchNormRelu(256, 256, k_size=3, stride=2, padding=1)
def forward(self, features_map):
_, _, _, _, outputs1 = self.lidar_encoder(features_map)
outputs = self.conv1(outputs1)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
outputs = self.conv4(outputs)
outputs = self.conv5(outputs)
return outputs
class MIMOGeneralDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, query_size, key_size, warp_flag, attn_dropout=0.1):
super().__init__()
self.sparsemax = Sparsemax(dim=1)
self.softmax = nn.Softmax(dim=1)
self.linear = nn.Linear(query_size, key_size)
self.warp_flag = warp_flag
print('Msg size: ',query_size,' Key size: ', key_size)
def forward(self, qu, k, v, sparse=True):
# qu (batch,5,32)
# k (batch,5,1024)
# v (batch,5,channel,size,size)
query = self.linear(qu) # (batch,5,key_size)
# normalization
# query_norm = query.norm(p=2,dim=2).unsqueeze(2).expand_as(query)
# query = query.div(query_norm + 1e-9)
# k_norm = k.norm(p=2,dim=2).unsqueeze(2).expand_as(k)
# k = k.div(k_norm + 1e-9)
# generate the
attn_orig = torch.bmm(k, query.transpose(2, 1)) # (batch,5,5) column: differnt keys and the same query
# scaling [not sure]
# scaling = torch.sqrt(torch.tensor(k.shape[2],dtype=torch.float32)).cuda()
# attn_orig = attn_orig/ scaling # (batch,5,5) column: differnt keys and the same query
attn_orig_softmax = self.softmax(attn_orig) # (batch,5,5)
attn_shape = attn_orig_softmax.shape
bats, key_num, query_num = attn_shape[0], attn_shape[1], attn_shape[2]
attn_orig_softmax_exp = attn_orig_softmax.view(bats, key_num, query_num, 1, 1, 1)
if self.warp_flag==1:
v_exp = v
else:
v_exp = torch.unsqueeze(v, 2)
v_exp = v_exp.expand(-1, -1, query_num, -1, -1, -1)
output = attn_orig_softmax_exp * v_exp # (batch,5,channel,size,size)
output_sum = output.sum(1) # (batch,1,channel,size,size)
return output_sum, attn_orig_softmax
class km_generator(nn.Module):
def __init__(self, out_size=128, input_feat_sz=32):
super(km_generator, self).__init__()
feat_map_sz = input_feat_sz // 4
self.n_feat = int(256 * feat_map_sz * feat_map_sz)
self.fc = nn.Sequential(
nn.Linear(self.n_feat, 256), #
nn.ReLU(inplace=True),
nn.Linear(256, 128), #
nn.ReLU(inplace=True),
nn.Linear(128, out_size)) #
def forward(self, features_map):
outputs = self.fc(features_map.view(-1, self.n_feat))
return outputs
| 43.800979
| 163
| 0.576511
| 11,791
| 80,550
| 3.664235
| 0.040879
| 0.020831
| 0.026178
| 0.020831
| 0.833654
| 0.816827
| 0.808541
| 0.793728
| 0.780118
| 0.768985
| 0
| 0.048085
| 0.310143
| 80,550
| 1,839
| 164
| 43.800979
| 0.729431
| 0.124122
| 0
| 0.8
| 0
| 0
| 0.006181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035249
| false
| 0
| 0.006897
| 0
| 0.081992
| 0.002299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
125438425fafd5a22575941078697bd9504dd3f5
| 1,269
|
py
|
Python
|
tlidb/TLiDB/metrics/loss.py
|
alon-albalak/TLiDB
|
4f3524a3bbe7580e417dd884c4dc8751bdaf8855
|
[
"MIT"
] | null | null | null |
tlidb/TLiDB/metrics/loss.py
|
alon-albalak/TLiDB
|
4f3524a3bbe7580e417dd884c4dc8751bdaf8855
|
[
"MIT"
] | null | null | null |
tlidb/TLiDB/metrics/loss.py
|
alon-albalak/TLiDB
|
4f3524a3bbe7580e417dd884c4dc8751bdaf8855
|
[
"MIT"
] | null | null | null |
from tlidb.TLiDB.metrics.metrics import Metric, ElementwiseMetric
class Loss(Metric):
def __init__(self, loss_fn, name=None):
self.loss_fn = loss_fn
if name is None:
name = 'loss'
super().__init__(name=name)
def _compute(self, y_pred, y_true):
"""
Helper for computing element-wise metric, implemented for each metric
Args:
- y_pred (Tensor): Predicted targets or model output
- y_true (Tensor): True targets
Output:
- element_wise_metrics (Tensor): tensor of size (batch_size, )
"""
return self.loss_fn(y_pred, y_true)
class ElementwiseLoss(ElementwiseMetric):
def __init__(self, loss_fn, name=None):
self.loss_fn = loss_fn
if name is None:
name = 'loss'
super().__init__(name=name)
def _compute_element_wise(self, y_pred, y_true):
"""
Helper for computing element-wise metric, implemented for each metric
Args:
- y_pred (Tensor): Predicted targets or model output
- y_true (Tensor): True targets
Output:
- element_wise_metrics (Tensor): tensor of size (batch_size, )
"""
return self.loss_fn(y_pred, y_true)
| 33.394737
| 77
| 0.609929
| 158
| 1,269
| 4.607595
| 0.253165
| 0.065934
| 0.082418
| 0.054945
| 0.835165
| 0.835165
| 0.835165
| 0.835165
| 0.835165
| 0.835165
| 0
| 0
| 0.300236
| 1,269
| 37
| 78
| 34.297297
| 0.81982
| 0.383767
| 0
| 0.705882
| 0
| 0
| 0.012214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
89c8f98b9a320c5cc2af9f4feb8b1176518293c3
| 373
|
py
|
Python
|
tests/parser/aggregates.min.propagation.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.min.propagation.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.min.propagation.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
distanza(1,0).
distanza(2,1).
d(1) | nd(1).
d(2) | nd(2).
:- not #count{D: d(D)}=1.
serve(1,Dist) :- distanza(_,Dist),
Dist = #min {Y : d(D1), distanza(D1,Y) }.
"""
output = """
distanza(1,0).
distanza(2,1).
d(1) | nd(1).
d(2) | nd(2).
:- not #count{D: d(D)}=1.
serve(1,Dist) :- distanza(_,Dist),
Dist = #min {Y : d(D1), distanza(D1,Y) }.
"""
| 12.032258
| 43
| 0.495979
| 68
| 373
| 2.691176
| 0.220588
| 0.043716
| 0.10929
| 0.196721
| 0.939891
| 0.939891
| 0.939891
| 0.939891
| 0.939891
| 0.939891
| 0
| 0.079734
| 0.193029
| 373
| 30
| 44
| 12.433333
| 0.528239
| 0
| 0
| 0.888889
| 0
| 0
| 0.91689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
89cf4cd8f5a0ca0803bb7b93f36a41c5f80e732d
| 39
|
py
|
Python
|
miniconda3-lnx/pkgs/wheel-0.34.2-py37_0/info/test/run_test.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 1
|
2021-11-08T01:25:40.000Z
|
2021-11-08T01:25:40.000Z
|
miniconda3-lnx/pkgs/wheel-0.34.2-py37_0/info/test/run_test.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 19
|
2021-03-10T21:30:56.000Z
|
2022-02-27T06:45:03.000Z
|
miniconda3-lnx/pkgs/wheel-0.34.2-py37_0/info/test/run_test.py
|
Thibaut-Kovaltchouk/MultiPyzo
|
a15ecf77e31ebeb195e70385f5ac132f6ab4504d
|
[
"CC0-1.0"
] | 2
|
2021-11-08T01:25:30.000Z
|
2022-01-13T07:53:38.000Z
|
print("import: 'wheel'")
import wheel
| 9.75
| 24
| 0.692308
| 5
| 39
| 5.4
| 0.6
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 3
| 25
| 13
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
d60b40a874b9a32407bb3d5d281e5ea04cc05082
| 23,642
|
py
|
Python
|
sdk/fedn/proto/alliance_pb2_grpc.py
|
joshyka/fedn
|
398f9bb9f913f640254294b97b118292af6996ce
|
[
"Apache-2.0"
] | null | null | null |
sdk/fedn/proto/alliance_pb2_grpc.py
|
joshyka/fedn
|
398f9bb9f913f640254294b97b118292af6996ce
|
[
"Apache-2.0"
] | null | null | null |
sdk/fedn/proto/alliance_pb2_grpc.py
|
joshyka/fedn
|
398f9bb9f913f640254294b97b118292af6996ce
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from fedn.proto import alliance_pb2 as fedn_dot_proto_dot_alliance__pb2
class ReducerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetGlobalModel = channel.unary_unary(
'/grpc.Reducer/GetGlobalModel',
request_serializer=fedn_dot_proto_dot_alliance__pb2.GetGlobalModelRequest.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.GetGlobalModelResponse.FromString,
)
class ReducerServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetGlobalModel(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ReducerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetGlobalModel': grpc.unary_unary_rpc_method_handler(
servicer.GetGlobalModel,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.GetGlobalModelRequest.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.GetGlobalModelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.Reducer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Reducer(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetGlobalModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Reducer/GetGlobalModel',
fedn_dot_proto_dot_alliance__pb2.GetGlobalModelRequest.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.GetGlobalModelResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class ConnectorStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AllianceStatusStream = channel.unary_stream(
'/grpc.Connector/AllianceStatusStream',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Status.FromString,
)
self.SendStatus = channel.unary_unary(
'/grpc.Connector/SendStatus',
request_serializer=fedn_dot_proto_dot_alliance__pb2.Status.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
self.ListActiveClients = channel.unary_unary(
'/grpc.Connector/ListActiveClients',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ListClientsRequest.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientList.FromString,
)
self.SendHeartbeat = channel.unary_unary(
'/grpc.Connector/SendHeartbeat',
request_serializer=fedn_dot_proto_dot_alliance__pb2.Heartbeat.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
class ConnectorServicer(object):
"""Missing associated documentation comment in .proto file."""
def AllianceStatusStream(self, request, context):
"""Stream endpoint for status updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendStatus(self, request, context):
"""Report endpoint
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListActiveClients(self, request, context):
"""rpc RegisterClient (ClientAvailableMessage) returns (Response);
List active clients endpoint
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendHeartbeat(self, request, context):
"""Client messaging to stay engaged.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ConnectorServicer_to_server(servicer, server):
rpc_method_handlers = {
'AllianceStatusStream': grpc.unary_stream_rpc_method_handler(
servicer.AllianceStatusStream,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Status.SerializeToString,
),
'SendStatus': grpc.unary_unary_rpc_method_handler(
servicer.SendStatus,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.Status.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
'ListActiveClients': grpc.unary_unary_rpc_method_handler(
servicer.ListActiveClients,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ListClientsRequest.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.ClientList.SerializeToString,
),
'SendHeartbeat': grpc.unary_unary_rpc_method_handler(
servicer.SendHeartbeat,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.Heartbeat.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.Connector', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Connector(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def AllianceStatusStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Connector/AllianceStatusStream',
fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Status.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/SendStatus',
fedn_dot_proto_dot_alliance__pb2.Status.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListActiveClients(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/ListActiveClients',
fedn_dot_proto_dot_alliance__pb2.ListClientsRequest.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.ClientList.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendHeartbeat(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/SendHeartbeat',
fedn_dot_proto_dot_alliance__pb2.Heartbeat.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class CombinerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ModelUpdateRequestStream = channel.unary_stream(
'/grpc.Combiner/ModelUpdateRequestStream',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.FromString,
)
self.ModelUpdateStream = channel.unary_stream(
'/grpc.Combiner/ModelUpdateStream',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdate.FromString,
)
self.ModelValidationRequestStream = channel.unary_stream(
'/grpc.Combiner/ModelValidationRequestStream',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.FromString,
)
self.ModelValidationStream = channel.unary_stream(
'/grpc.Combiner/ModelValidationStream',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelValidation.FromString,
)
self.SendModelUpdateRequest = channel.unary_unary(
'/grpc.Combiner/SendModelUpdateRequest',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
self.SendModelUpdate = channel.unary_unary(
'/grpc.Combiner/SendModelUpdate',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdate.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
self.SendModelValidationRequest = channel.unary_unary(
'/grpc.Combiner/SendModelValidationRequest',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
self.SendModelValidation = channel.unary_unary(
'/grpc.Combiner/SendModelValidation',
request_serializer=fedn_dot_proto_dot_alliance__pb2.ModelValidation.SerializeToString,
response_deserializer=fedn_dot_proto_dot_alliance__pb2.Response.FromString,
)
class CombinerServicer(object):
"""Missing associated documentation comment in .proto file."""
def ModelUpdateRequestStream(self, request, context):
"""Stream endpoints for training/validation pub/sub
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelUpdateStream(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelValidationRequestStream(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelValidationStream(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendModelUpdateRequest(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendModelUpdate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendModelValidationRequest(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendModelValidation(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CombinerServicer_to_server(servicer, server):
rpc_method_handlers = {
'ModelUpdateRequestStream': grpc.unary_stream_rpc_method_handler(
servicer.ModelUpdateRequestStream,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.SerializeToString,
),
'ModelUpdateStream': grpc.unary_stream_rpc_method_handler(
servicer.ModelUpdateStream,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdate.SerializeToString,
),
'ModelValidationRequestStream': grpc.unary_stream_rpc_method_handler(
servicer.ModelValidationRequestStream,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.SerializeToString,
),
'ModelValidationStream': grpc.unary_stream_rpc_method_handler(
servicer.ModelValidationStream,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.ModelValidation.SerializeToString,
),
'SendModelUpdateRequest': grpc.unary_unary_rpc_method_handler(
servicer.SendModelUpdateRequest,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
'SendModelUpdate': grpc.unary_unary_rpc_method_handler(
servicer.SendModelUpdate,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelUpdate.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
'SendModelValidationRequest': grpc.unary_unary_rpc_method_handler(
servicer.SendModelValidationRequest,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
'SendModelValidation': grpc.unary_unary_rpc_method_handler(
servicer.SendModelValidation,
request_deserializer=fedn_dot_proto_dot_alliance__pb2.ModelValidation.FromString,
response_serializer=fedn_dot_proto_dot_alliance__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.Combiner', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Combiner(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ModelUpdateRequestStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelUpdateRequestStream',
fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelUpdateStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelUpdateStream',
fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.ModelUpdate.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelValidationRequestStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelValidationRequestStream',
fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModelValidationStream(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Combiner/ModelValidationStream',
fedn_dot_proto_dot_alliance__pb2.ClientAvailableMessage.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.ModelValidation.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendModelUpdateRequest(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelUpdateRequest',
fedn_dot_proto_dot_alliance__pb2.ModelUpdateRequest.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendModelUpdate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelUpdate',
fedn_dot_proto_dot_alliance__pb2.ModelUpdate.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendModelValidationRequest(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelValidationRequest',
fedn_dot_proto_dot_alliance__pb2.ModelValidationRequest.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SendModelValidation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Combiner/SendModelValidation',
fedn_dot_proto_dot_alliance__pb2.ModelValidation.SerializeToString,
fedn_dot_proto_dot_alliance__pb2.Response.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 46.175781
| 114
| 0.682641
| 2,144
| 23,642
| 7.158116
| 0.063433
| 0.05734
| 0.061771
| 0.077214
| 0.840229
| 0.818987
| 0.817293
| 0.771942
| 0.709911
| 0.627484
| 0
| 0.004496
| 0.247441
| 23,642
| 511
| 115
| 46.266145
| 0.858082
| 0.069622
| 0
| 0.586797
| 1
| 0
| 0.08135
| 0.046348
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07824
| false
| 0
| 0.00489
| 0.031785
| 0.136919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d60e61fd12dc004e952b3c0619d302a2767f5029
| 11,982
|
py
|
Python
|
FirstGame.py
|
JaiDarby/First-Game
|
a892b8653bb1e733d3d67498ad490511e1c8a390
|
[
"MIT"
] | 1
|
2021-11-13T08:52:23.000Z
|
2021-11-13T08:52:23.000Z
|
FirstGame.py
|
JaiDarby/First-Game
|
a892b8653bb1e733d3d67498ad490511e1c8a390
|
[
"MIT"
] | null | null | null |
FirstGame.py
|
JaiDarby/First-Game
|
a892b8653bb1e733d3d67498ad490511e1c8a390
|
[
"MIT"
] | null | null | null |
""""
Welcome to my first offical program
"""
input ("Press enter at any point to continue throughout the game.")
nameuser = input ("What is your name? ")
print ("I wish they paid me enough to care.")
input()
play = input ("Do you want to play a game? [Y/N]: ")
if play.lower() == ("y"):
game = input ("Awesome, what game do you want to play? \n Guess the number [1] \n Mad Lib [2] \n Triva [3] \n: ")
elif play.lower() == ("n"):
sure1 = input ("Are you sure? [Y/N]: ")
if sure1.lower() == ("y"):
print ("Ok, screw you then")
game = "testing"
elif sure1.lower() == ("n"):
play1 = input ("So you do want to play? [Y/N]: ")
if play1.lower() == ("y"):
game = input ("Awesome, what game do you want to play? \n Guess the number [1] \n Mad Lib [2] \n Triva [3] \n: ")
elif play1.lower() == ("n"):
print ('Fine I guess...')
game = "tetsing"
if game == ("1"):
import random
number = random.randint(1, 10)
tries = 1
guess = int (input("I'm thinking of a number between 1-10, guess what it is: " ))
if guess > number:
print ("Lower you moron!")
elif guess < number:
print ("Higher you moron!")
elif guess == number:
print ("You got it first try!!")
while guess != number:
tries += 1
guess = int(input("Try again: "))
if guess < number:
print ("Higher you idiot!")
elif guess > number:
print ("Lower you idiot!")
elif guess == number:
print ("You guessed it! And it only took you", tries, "tries!")
elif game == ("2"):
print ("Awesome! You want to play Mad Libs!")
input ()
print ("This game is simple, just input what is asked")
input ()
noun1 = input("Noun: ")
state = input("State: ")
verb1 = input("Verb (past tense): ")
noun2 = input("Noun: ")
name = input ("Proper name: ")
noun3 = input("Noun: ")
noun4 = input("Noun: ")
body = input("Body Part: ")
adj = input("Adjective: ")
relative = input ("Relative: ")
act = input ("Activity: ")
food = input ("Fast Food Resturant: ")
adj2 = input ("Verb (Past tense): ")
month = input ("Month: ")
verb3 = input ("Verb: ")
noun5 = input ("Noun: ")
verb4 = input ("Verb (Past tense): ")
adj3 = input( "Adjective: ")
verb5 = input ("Verb: ")
obj = input ("Object: ")
noun6 = input("Plural Noun: ")
verb2 = input("Verb( -ing): ")
print ("You're finally done")
input()
seelib = input ('Would you like to see your Mad Lib?[Y/N]')
if seelib.lower() == "y":
print ("A", noun1, "in",state, "was arrested this morning after he", verb1, "in front of", noun2 ,".", name , ", had a history of", verb2 , ", but no one - not even his" , noun3 , "- ever imagined he'd" , verb3 , "with a", noun4, "stuck in his", body, ".", "'I always thought he was", adj + ", but never thought he'd do something like this. Even his", relative, "was surprised.' After a breif", act, "cops followed him to a", food +", where he reportedly", adj2, "in the fry machine. In", month+ ", a woman was charged with a similar crime. But rather than", verb3, "with a", noun5 + ", she", verb4, "with a", adj3, "dog. Either way, we imagine that after witnessing him", verb5, "with a", noun5, "there are probably a whole lot of", noun6, "that are going to need some therapy.")
elif seelib.lower() == "n":
print ("I mean... I guess you wasted all that time for nothing then.")
elif game == ("3"):
score = 0
total = 4
print ("What kind of loser chooses trivia?")
input ()
triviagame = input ("Whatever, what would you like to do trivia about?: \n Math [1] \n Computers [2] \n The Programmer[3] \n :")
if triviagame == ("1"):
print ("Math? Yeah... you really are a nerd")
input()
ans = input ("solve for x: 2x - 4y = 9? \n x = ")
if ans == ("9/2"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("solve for x: 7 - 2 + x = 12 \n x = ")
if ans == ("7"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("What is 20% of 30 dollars? ")
if ans == ("6"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("30 is 60% of what number? ")
if ans == ("50"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
print ("You have finished the math quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
elif triviagame == ("2"):
print ("Computers? Ok nerd.")
input()
ans = input ("What command do you use to output a string in python? " )
if ans == ("print"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command am is being used to get your answer? ")
if ans == ("input"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command would you use to declare a variable? ")
if ans == ("="):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command would you use to determine if a variable is equal to something? ")
if ans == ("=="):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
print ("You have finished the computer quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
elif triviagame == ("3"):
print ("You think you know about the man behind the scenes? Lets find out!")
input()
ans = input ("What is the programmers first name? ")
if ans == ("Jai'Mir"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What month was the programmer born in? ")
if ans.lower == ("may"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("How many siblings does the programmer have? ")
if ans == ("5"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("Does the programmer have a pet [Y/N]? ")
if ans.lower == ("y"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
print ("You have finished the programmer quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
#Do you want to play another game?
input()
playagain = input ("Do you want to play another game? [Y/N]: ")
while playagain == ("y"):
play = input("What would you like to play next? \n Guess the number [1] \n Mad Lib [2] \n Triva [3] \n: ")
if play == ("1"):
import random
number = random.randint(1, 10)
tries = 1
guess = int (input("I'm thinking of a number between 1-10, guess what it is: " ))
if guess > number:
print ("Lower you moron!")
elif guess < number:
print ("Higher you moron!")
elif game == number:
print ("You got it first try!!!")
while guess != number:
tries += 1
guess = int(input("Try again: "))
if guess < number:
print ("Higher you idiot!")
elif guess > number:
print ("Lower you idiot!")
elif guess == number:
print ("You guessed it! And it only took you", tries, "tries!")
playagain = input ("Do you want to play another game? [Y/N]: ")
if play == ("2"):
print ("Awesome! You want to play Mad Libs!")
input ()
print ("This game is simple, just input what is asked")
input ()
noun1 = input("Noun: ")
state = input("State: ")
verb1 = input("Verb (past tense): ")
noun2 = input("Noun: ")
name = input ("Proper name: ")
noun3 = input("Noun: ")
noun4 = input("Noun: ")
body = input("Body Part: ")
adj = input("Adjective: ")
relative = input ("Relative: ")
act = input ("Activity: ")
food = input ("Fast Food Resturant: ")
adj2 = input ("Verb (Past tense): ")
month = input ("Month: ")
verb3 = input ("Verb: ")
noun5 = input ("Noun: ")
verb4 = input ("Verb (Past tense): ")
adj3 = input( "Adjective: ")
verb5 = input ("Verb: ")
obj = input ("Object: ")
noun6 = input("Plural Noun: ")
verb2 = input("Verb( -ing): ")
print ("You're finally done")
input()
seelib = input ('Would you like to see your Mad Lib?[Y/N]')
if seelib.lower() == "y":
print ("A", noun1, "in",state, "was arrested this morning after he", verb1, "in front of", noun2 ,".", name , ", had a history of", verb2 , ", but no one - not even his" , noun3 , "- ever imagined he'd" , verb3 , "with a", noun4, "stuck in his", body, ".", "'I always thought he was", adj + ", but never thought he'd do something like this. Even his", relative, "was surprised.' After a breif", act, "cops followed him to a", food +", where he reportedly", adj2, "in the fry machine. In", month+ ", a woman was charged with a similar crime. But rather than", verb3, "with a", noun5 + ", she", verb4, "with a", adj3, "dog. Either way, we imagine that after witnessing him", verb5, "with a", noun5, "there are probably a whole lot of", noun6, "that are going to need some therapy.")
elif seelib.lower() == "n":
print ("I mean... I guess you wasted all that time for nothing then.")
playagain = input ("Do you want to play another game? [Y/N]: ")
if play == ("3"):
score = 0
total = 4
print ("What kind of loser chooses trivia?")
input ()
triviagame = input ("Whatever, what would you like to do trivia about?: \n Math [1] \n Computers [2] \n The Programmer[3] \n :")
if triviagame == ("1"):
print ("Math? Yeah... you really are a nerd")
input()
ans = input ("solve for x: 2x - 4y = 9? \n x = ")
if ans == ("9/2"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("solve for x: 7 - 2 + x = 12 \n x = ")
if ans == ("7"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("What is 20% of 30 dollars? ")
if ans == ("6"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
ans = input ("30 is 60% of what number? ")
if ans == ("50"):
print ("Correct!")
score += 1
else:
print ("You're an idiot!")
print ("You have finished the math quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
elif triviagame == ("2"):
print ("Computers? Ok nerd.")
input()
ans = input ("What command do you use to output a string in python? " )
if ans == ("print"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command am is being used to get your answer? ")
if ans == ("input"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command would you use to declare a variable? ")
if ans == ("="):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What command would you use to determine if a variable is equal to something? ")
if ans == ("=="):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
print ("You have finished the computer quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
elif triviagame == ("3"):
print ("You think you know about the man behind the scenes? Lets find out!")
input()
ans = input ("What is the programmers first name? ")
if ans == ("Jai'Mir"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("What month was the programmer born in? ")
if ans.lower == ("may"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("How many siblings does the programmer have? ")
if ans == ("5"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
ans = input ("Does the programmer have a pet [Y/N]? ")
if ans.lower == ("y"):
print ("Correct!")
score += 1
else:
print ("No, you idiot.")
print ("You have finished the programmer quiz! \n You've answered a total of", score, "questions right!" "\n You got a", score/total*100, "%")
playagain = input ("Do you want to play another game? [Y/N]: ")
if playagain == ("n"):
print ("Thank you for playing", nameuser + "!")
| 34.332378
| 783
| 0.590636
| 1,790
| 11,982
| 3.953631
| 0.150279
| 0.029391
| 0.057652
| 0.061043
| 0.940088
| 0.935849
| 0.932316
| 0.928642
| 0.928642
| 0.928642
| 0
| 0.021337
| 0.237273
| 11,982
| 348
| 784
| 34.431034
| 0.753036
| 0.027959
| 0
| 0.908228
| 0
| 0.015823
| 0.484495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006329
| 0
| 0.006329
| 0.28481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3a6c504d67b753d8523d07ff72bd8f84c8d8f39
| 17,936
|
py
|
Python
|
class.py
|
SoulFire2879/autopro
|
5cb6495db95ab3b1a67df25974e61ced907011ed
|
[
"MIT"
] | null | null | null |
class.py
|
SoulFire2879/autopro
|
5cb6495db95ab3b1a67df25974e61ced907011ed
|
[
"MIT"
] | null | null | null |
class.py
|
SoulFire2879/autopro
|
5cb6495db95ab3b1a67df25974e61ced907011ed
|
[
"MIT"
] | null | null | null |
import time
from links import *
from info import *
from join import *
from datetime import datetime
from datetime import date
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
name = ('ur name lol')
def joinenglish():
driver.get(english)
time.sleep ( 5 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click()
print ( 'English class has been joined successfully' )
time.sleep(6)
driver.find_element_by_xpath('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span').click()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys ( name )
time.sleep(3)
driver.find_element_by_xpath('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]').click()
def joinmath():
driver.get(math)
time.sleep ( 8 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Maths class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys(name)
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joinhistory():
driver.get(history)
time.sleep ( 8 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click()
print ( 'History/Civics class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys (name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joinphy():
driver.get(physics)
time.sleep(8)
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Physics class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys ( name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joinmal():
driver.get(malayalam)
time.sleep ( 8 )
driver.find_element_by_xpath ( '//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Malayalam class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys (name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]').click ()
def joinbio():
driver.get(biology)
time.sleep ( 8 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Biology class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys (name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joinchem():
driver.get(chem)
time.sleep ( 8 )
driver.find_element_by_xpath ( '//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ( '//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Chemistry class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys ( name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joinhindi():
driver.get(hindi)
time.sleep ( 8 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ( '//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
time.sleep ( 6 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
print ( 'Hindi class has been joined successfully' )
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys (name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
print ( 'Hindi class has been joined successfully' )
def joinface():
driver.get(face)
time.sleep ( 8 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 1 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Facetime c has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys ( name )
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
def joingeo():
driver.get(geo)
time.sleep(8)
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div/div/div' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ( '//*[@id="yDmH0d"]/c-wiz/div/div/div[8]/div[3]/div/div/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[1]' ).click ()
print ( 'Geography class has been joined successfully' )
time.sleep ( 6 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[6]/div[3]/div/div[2]/div[3]/span/span/div/div/span' ).click ()
time.sleep ( 2 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[1]/div[1]/div[2]/textarea' ).send_keys(name)
time.sleep ( 3 )
driver.find_element_by_xpath ('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[3]/div/div[2]/div[2]/div[2]/span[2]/div/div[3]/div[2]' ).click ()
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.media_stream_mic" : 1,
"profile.default_content_setting_values.notifications": 2,
"profile.default_content_setting_values.media_stream_camera": 1}
chrome_options.add_experimental_option("prefs",prefs)
path = r'C:\Users\User\Documents\chromedriver_win32 (3)\chromedriver.exe'
driver = webdriver.Chrome(path, chrome_options = chrome_options)
#this part signs into google
driver.maximize_window()
driver.get('https://accounts.google.com/o/oauth2/v2/auth/oauthchooseaccount?redirect_uri=https%3A%2F%2Fdevelopers.google.com%2Foauthplayground&prompt=consent&response_type=code&client_id=407408718192.apps.googleusercontent.com&scope=email&access_type=offline&flowName=GeneralOAuthFlow')
time.sleep(3)
driver.find_element_by_xpath('//*[@id="yDmH0d"]').click()
driver.find_element_by_xpath('//input[@type="email"]').send_keys(email)
driver.find_element_by_xpath('//*[@id="identifierNext"]').click()
time.sleep(3)
driver.find_element_by_xpath('//input[@type="password"]').send_keys(password)
driver.find_element_by_xpath('//*[@id="passwordNext"]').click()
time.sleep(2)
e = datetime.now()
day = e.strftime ( "%A" )
print ( day )
if day == "Monday":
joinmath()
time.sleep(3)
driver.find_element_by_xpath('//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div').click()
print('Maths class has been left successfully')
joinphy()
time.sleep(3)
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click()
print ( 'Physics class has been left successfully' )
joinhistory()
time.sleep (3)
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click()
print ( 'History class has been left successfully' )
joinenglish()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'English class has been left successfully' )
elif day == "Tuesday":
joinmath()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinhistory()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinmal()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinbio()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
elif day == "Wednesday":
joinchem()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinmath()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinenglish()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinhindi()
time.sleep ( 3000 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
elif day == "Thursday":
joinmath()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinface()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinenglish()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinmal()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
elif day == "Friday":
joinmath()
time.sleep (3)
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joingeo()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinmal()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
joinphy()
time.sleep ( 3 )
driver.find_element_by_xpath ( '//*[@id="ow3"]/div[1]/div/div[8]/div[3]/div[9]/div[2]/div[2]/div' ).click ()
print ( 'Maths class has been left successfully' )
elif day == "Saturday":
print('its saturday bro lmao ')
elif day == 'Sunday':
print('Its sunday bro lmao')
| 59.194719
| 287
| 0.616024
| 3,225
| 17,936
| 3.335504
| 0.048062
| 0.173468
| 0.116482
| 0.150135
| 0.878405
| 0.875244
| 0.867342
| 0.851817
| 0.839732
| 0.836107
| 0
| 0.056496
| 0.130575
| 17,936
| 302
| 288
| 59.390728
| 0.633321
| 0.001505
| 0
| 0.722222
| 0
| 0.3
| 0.552715
| 0.45893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0.007407
| 0.033333
| 0
| 0.07037
| 0.125926
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c3d78c43b802a39aed603a7a1d7c3fd58ff7dae6
| 5,414
|
py
|
Python
|
source/tournament_scenes.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 14
|
2018-09-20T23:01:27.000Z
|
2021-05-25T11:05:09.000Z
|
source/tournament_scenes.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 44
|
2018-09-15T03:05:50.000Z
|
2022-03-22T02:46:24.000Z
|
source/tournament_scenes.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 13
|
2018-10-02T11:45:24.000Z
|
2021-08-22T18:41:44.000Z
|
# Tournament Play Enhancements (1.5) by Windyplains
from header_common import *
from header_operations import *
from header_triggers import *
from header_scenes import *
from module_constants import *
scenes = [
# ARENA OVERHAUL MOD SCENES to be used with TOURNAMENT PLAY ENHANCEMENTS - Windyplains
# Credit for scenes: Adorno
("town_1_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_thir_new" ),
("town_2_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_thir_new" ),
("town_3_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain_farmountain" ),
("town_4_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain" ),
("town_5_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain_farmountain" ),
("town_6_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain" ),
("town_7_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain" ),
("town_8_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain_farmountain" ),
("town_9_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x40001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_snow_farmountain" ),
("town_10_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_steppe" ),
("town_11_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x40001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_snow" ),
("town_12_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_thir_new" ),
("town_13_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ),
("town_14_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_steppe" ),
("town_15_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain" ),
("town_16_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0xa0001d9300031ccb0000156f000048ba0000361c" ,[] ,[] ,"outer_terrain_plain" ),
("town_17_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_steppe" ),
("town_18_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_steppe" ),
("town_19_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_desert" ),
("town_20_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_desert" ),
("town_21_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_desert" ),
("town_22_arena_alternate" ,sf_generate ,"none" ,"none" ,(0,0) ,(100,100) ,-100 ,"0x00000002200005000005f57b00005885000046bd00006d9c" ,[] ,[] ,"outer_terrain_desert" ),
]
# Used by modmerger framework version >= 200 to merge stuff
def modmerge(var_set):
try:
var_name_1 = "scenes"
orig_scenes = var_set[var_name_1]
orig_scenes.extend(scenes)
except KeyError:
errstring = "Variable set does not contain expected variable: \"%s\"." % var_name_1
raise ValueError(errstring)
| 120.311111
| 219
| 0.573144
| 468
| 5,414
| 6.305556
| 0.192308
| 0.089461
| 0.119282
| 0.178922
| 0.815995
| 0.815995
| 0.815995
| 0.815995
| 0.815995
| 0.815995
| 0
| 0.277093
| 0.280754
| 5,414
| 45
| 220
| 120.311111
| 0.48074
| 0.040266
| 0
| 0
| 0
| 0
| 0.419877
| 0.322419
| 0
| 0
| 0.190293
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.135135
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c3d9df4a7cb2ea9e9d20fdab805662f1d7da264e
| 15,556
|
py
|
Python
|
test.py
|
akashmittal18/Twitter-Sentimental-Analysis-
|
68eb06c778c9d512d6da9da5c657a510913bc2c1
|
[
"MIT"
] | null | null | null |
test.py
|
akashmittal18/Twitter-Sentimental-Analysis-
|
68eb06c778c9d512d6da9da5c657a510913bc2c1
|
[
"MIT"
] | null | null | null |
test.py
|
akashmittal18/Twitter-Sentimental-Analysis-
|
68eb06c778c9d512d6da9da5c657a510913bc2c1
|
[
"MIT"
] | 2
|
2020-10-02T18:55:37.000Z
|
2020-10-18T10:59:42.000Z
|
<<<<<<< HEAD
# It will import all the modules stored in AllImport module
from AllImport import *
# Use hashtag and classify in % how many sentiments are +ve and -ve based on fetched tweets
from tkinter import *
import time
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
# To authenticate and access the twitter
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
# get all the data of the tweets and,pass only tweets text to preprocess and finally returns only the processed tweets
def process(data):
temp = []
for text in data['sentence']:
text = pp.pre_processing(text)
temp.append(text)
data['sentence'] = temp
return data['sentence']
def execute():
try:
user = Entry1.get()
num_tweets = w.get()
twitter_client = TwitterClient()
api = twitter_client.get_twitter_client_api()
tweets = api.user_timeline(screen_name=user, count=num_tweets)
tweets_text = []
for tweet in tweets:
tweets_text.append(pp.pre_processing(tweet.text))
datafile = pd.read_csv('Train.csv', sep=',', encoding="utf-8")
x = process(datafile)
y = datafile['label']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
vector = CountVectorizer()
vector.fit(x_train)
x_train_vft = vector.transform(x_train)
x_test_vft = vector.transform(x_test)
count = 1
for tweet in tweets_text:
tweet_text = str(count)+":- "+tweet
msg_list.insert(END,tweet_text)
# print(count, tweet)
count += 1
tweet = [tweet]
vec = vector.transform(tweet)
# Multinomial Naive Bayes-Every feature is independent,probability is cal and highest one will be o/p,fastest
temp = mnb.MultinomialNBAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Multinomial Naive Bayes")
msg_list.insert(END,temp)
"""
Regression analysis is an important tool for modelling and analyzing data. Here, we fit a curve/line to the data
Points,in such a manner that the differences b/w the distances of data points from the curve/line is minimized.
a topic of some context.Ex:context:-road accident,topic:-car accident,it can happen or not happen
"""
"""
Logistic Regression-It can give a binary or multi result(positive/negative/neutral),has a range 0 to 1
# used for category data.Its has a curve.3 Types
# lbgfs or lmbgfs is Limited memory Broyden–Fletcher–Goldfarb–Shanno Algo.Memory optimization algo
# newton-cg:- newton's method for Large Bound-Constrained Optimization
# multi-calss tells which logistic regression is being used
"""
# 1:-OrdinalLogisticRegression not used because it takes at lest 3 categories but we have 2,+ve and -ve
# 2:-Multinomial Logistic Regression-Used for 2 or more category,vision-shortsight,longsight,perfect
temp = mlr.MultinomialLRAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Multinomial Logistic Regression")
msg_list.insert(END,temp)
# 3:-Binary logistic regression-Used for 2 category,good,bad
temp = blr.BinomialLRAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Binary logistic regression")
msg_list.insert(END,temp)
# LinearRegression-find optimal line b/w the 2 data,where one data is independent(text),and other is dependent
# (type-pos/neg) on another
# lr.LinearRegressionAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
"""
SVM(support vector machine)-takes data as i/p and o/p a line that separates those classes[pos/neg] if possible
we find the points closest to the line from both the classes.These points are called support vectors.we compute
the distance between the line and the support vectors. This distance is called the margin. Our goal is to
maximize the margin. The hyperplane for which the margin is maximum is the optimal hyperplane.Thus SVM tries to
make a decision boundary in such a way that the separation between the two classes(that street) is as wide as
possible
"""
# Linear Classifier
temp = lc.LinearClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Linear Classifier")
msg_list.insert(END,temp)
# LinearSupportVectorClassifier-LinearSeparationOfDataHappensOptimalLineIsDrawn using margins b/w both data
temp = lsvc.LinearSupportVectorClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"LinearSupportVectorClassifier")
msg_list.insert(END,temp)
# Decision Tree Classifier
temp = dtc.DecisionTreeClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Decision Tree Classifier")
msg_list.insert(END,temp)
# Random Forest classifier
temp = rfc.RandomForestClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Random Forest classifier")
msg_list.insert(END,temp)
# Extra Trees Classifier
temp = etc.ExtraTreesClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Extra Trees Classifier")
msg_list.insert(END,temp)
msg_list.insert(END," ")
except Exception as e:
# Print the error
print(e)
# When reach the rate limit
def on_limit(self, track):
# Print rate limiting error
print("Rate limited, continuing")
# Continue mining tweets
return True
# When timed out
def on_timeout(self):
# Print timeout message
print(sys.stderr, 'Timeout')
# Wait 10 seconds
time.sleep(10)
# Return nothing
return
if __name__ == "__main__":
mainwindow = Tk()
mainwindow.title("Twitter Sentimental Analysis Engine")
Label(mainwindow, text="TWITTER SENTIMENTAL ANALYSIS ENGINE", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
photo = PhotoImage(file="Twitterlogo.png")
Label(mainwindow, image=photo, bg="black", fg="white").pack(side=TOP, fill=X)
messages_frame = Frame(mainwindow)
scrollbar = Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=RIGHT, fill=Y,padx=2,pady=2)
msg_list.pack(side=LEFT, fill=BOTH,padx=2,pady=2)
msg_list.pack(padx=2,pady=2)
messages_frame.pack()
Label(mainwindow, text="USERNAME", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
Entry1 = Entry(mainwindow)
Entry1.pack(side=TOP, padx=2, pady=2)
Label(mainwindow, text="NUMBER OF TWEETS", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
w = Scale(mainwindow, from_=1, to=10, orient=HORIZONTAL)
w.pack(side=TOP, fill=X, padx=2, pady=2)
But1 = Button(mainwindow, text="RUN", command=execute)
But1.pack(side=TOP, fill=X, padx=2, pady=2)
=======
# It will import all the modules stored in AllImport module
from AllImport import *
# Use hashtag and classify in % how many sentiments are +ve and -ve based on fetched tweets
from tkinter import *
import time
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client, id=self.twitter_user).items(num_friends):
friend_list.append(friend)
return friend_list
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets = []
for tweet in Cursor(self.twitter_client.home_timeline, id=self.twitter_user).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
# To authenticate and access the twitter
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
# get all the data of the tweets and,pass only tweets text to preprocess and finally returns only the processed tweets
def process(data):
temp = []
for text in data['sentence']:
text = pp.pre_processing(text)
temp.append(text)
data['sentence'] = temp
return data['sentence']
def execute():
try:
user = Entry1.get()
num_tweets = w.get()
twitter_client = TwitterClient()
api = twitter_client.get_twitter_client_api()
tweets = api.user_timeline(screen_name=user, count=num_tweets)
tweets_text = []
for tweet in tweets:
tweets_text.append(pp.pre_processing(tweet.text))
datafile = pd.read_csv('Train.csv', sep=',', encoding="utf-8")
x = process(datafile)
y = datafile['label']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
vector = CountVectorizer()
vector.fit(x_train)
x_train_vft = vector.transform(x_train)
x_test_vft = vector.transform(x_test)
count = 1
for tweet in tweets_text:
tweet_text = str(count)+":- "+tweet
msg_list.insert(END,tweet_text)
# print(count, tweet)
count += 1
tweet = [tweet]
vec = vector.transform(tweet)
# Multinomial Naive Bayes-Every feature is independent,probability is cal and highest one will be o/p,fastest
temp = mnb.MultinomialNBAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Multinomial Naive Bayes")
msg_list.insert(END,temp)
"""
Regression analysis is an important tool for modelling and analyzing data. Here, we fit a curve/line to the data
Points,in such a manner that the differences b/w the distances of data points from the curve/line is minimized.
a topic of some context.Ex:context:-road accident,topic:-car accident,it can happen or not happen
"""
"""
Logistic Regression-It can give a binary or multi result(positive/negative/neutral),has a range 0 to 1
# used for category data.Its has a curve.3 Types
# lbgfs or lmbgfs is Limited memory Broyden–Fletcher–Goldfarb–Shanno Algo.Memory optimization algo
# newton-cg:- newton's method for Large Bound-Constrained Optimization
# multi-calss tells which logistic regression is being used
"""
# 1:-OrdinalLogisticRegression not used because it takes at lest 3 categories but we have 2,+ve and -ve
# 2:-Multinomial Logistic Regression-Used for 2 or more category,vision-shortsight,longsight,perfect
temp = mlr.MultinomialLRAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Multinomial Logistic Regression")
msg_list.insert(END,temp)
# 3:-Binary logistic regression-Used for 2 category,good,bad
temp = blr.BinomialLRAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Binary logistic regression")
msg_list.insert(END,temp)
# LinearRegression-find optimal line b/w the 2 data,where one data is independent(text),and other is dependent
# (type-pos/neg) on another
# lr.LinearRegressionAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
"""
SVM(support vector machine)-takes data as i/p and o/p a line that separates those classes[pos/neg] if possible
we find the points closest to the line from both the classes.These points are called support vectors.we compute
the distance between the line and the support vectors. This distance is called the margin. Our goal is to
maximize the margin. The hyperplane for which the margin is maximum is the optimal hyperplane.Thus SVM tries to
make a decision boundary in such a way that the separation between the two classes(that street) is as wide as
possible
"""
# Linear Classifier
temp = lc.LinearClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Linear Classifier")
msg_list.insert(END,temp)
# LinearSupportVectorClassifier-LinearSeparationOfDataHappensOptimalLineIsDrawn using margins b/w both data
temp = lsvc.LinearSupportVectorClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"LinearSupportVectorClassifier")
msg_list.insert(END,temp)
# Decision Tree Classifier
temp = dtc.DecisionTreeClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Decision Tree Classifier")
msg_list.insert(END,temp)
# Random Forest classifier
temp = rfc.RandomForestClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Random Forest classifier")
msg_list.insert(END,temp)
# Extra Trees Classifier
temp = etc.ExtraTreesClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec)
msg_list.insert(END,"Extra Trees Classifier")
msg_list.insert(END,temp)
msg_list.insert(END," ")
except Exception as e:
# Print the error
print(e)
# When reach the rate limit
def on_limit(self, track):
# Print rate limiting error
print("Rate limited, continuing")
# Continue mining tweets
return True
# When timed out
def on_timeout(self):
# Print timeout message
print(sys.stderr, 'Timeout')
# Wait 10 seconds
time.sleep(10)
# Return nothing
return
if __name__ == "__main__":
mainwindow = Tk()
mainwindow.title("Twitter Sentimental Analysis Engine")
Label(mainwindow, text="TWITTER SENTIMENTAL ANALYSIS ENGINE", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
photo = PhotoImage(file="Twitterlogo.png")
Label(mainwindow, image=photo, bg="black", fg="white").pack(side=TOP, fill=X)
messages_frame = Frame(mainwindow)
scrollbar = Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=RIGHT, fill=Y,padx=2,pady=2)
msg_list.pack(side=LEFT, fill=BOTH,padx=2,pady=2)
msg_list.pack(padx=2,pady=2)
messages_frame.pack()
Label(mainwindow, text="USERNAME", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
Entry1 = Entry(mainwindow)
Entry1.pack(side=TOP, padx=2, pady=2)
Label(mainwindow, text="NUMBER OF TWEETS", bg="black", fg="white").pack(side=TOP, fill=X, padx=2, pady=2)
w = Scale(mainwindow, from_=1, to=10, orient=HORIZONTAL)
w.pack(side=TOP, fill=X, padx=2, pady=2)
But1 = Button(mainwindow, text="RUN", command=execute)
But1.pack(side=TOP, fill=X, padx=2, pady=2)
>>>>>>> a8eac8957e283fe23b26e99d32eac0ba302a4a04
mainwindow.mainloop()
| 39.482234
| 125
| 0.749936
| 2,348
| 15,556
| 4.818569
| 0.154174
| 0.025986
| 0.041365
| 0.05091
| 0.99452
| 0.99452
| 0.99452
| 0.99452
| 0.99452
| 0.99452
| 0
| 0.009297
| 0.149524
| 15,556
| 394
| 126
| 39.482234
| 0.845427
| 0.170417
| 0
| 0.983193
| 0
| 0
| 0.085437
| 0.005696
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02521
| null | null | 0.02521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7f22c865b41c252e003c76f398e03e1e956bba1d
| 24,430
|
py
|
Python
|
api/tests/test_container.py
|
tombh/deis
|
f98fd5e43acfa13c0780c25cfd40dd34d4d1bcc0
|
[
"Apache-2.0"
] | 1
|
2016-05-28T08:44:13.000Z
|
2016-05-28T08:44:13.000Z
|
api/tests/test_container.py
|
tombh/deis
|
f98fd5e43acfa13c0780c25cfd40dd34d4d1bcc0
|
[
"Apache-2.0"
] | null | null | null |
api/tests/test_container.py
|
tombh/deis
|
f98fd5e43acfa13c0780c25cfd40dd34d4d1bcc0
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for the Deis api app.
Run the tests with "./manage.py test api"
"""
from __future__ import unicode_literals
import json
from django.test import TestCase
from django.test.utils import override_settings
from api.models import Container
from deis import settings
def get_allocations(container_dict):
counts = {}
for container in container_dict.values():
name, _id = container.split(':')
if name in counts:
counts[name] += 1
else:
counts[name] = 1
return sorted(counts.values())
@override_settings(CELERY_ALWAYS_EAGER=True)
class ContainerTest(TestCase):
"""Tests creation of containers on nodes"""
fixtures = ['tests.json']
def setUp(self):
self.assertTrue(
self.client.login(username='autotest', password='password'))
url = '/api/providers'
creds = {'access_key': getattr(settings, 'EC2_ACCESS_KEY', 'x' * 32),
'secret_key': getattr(settings, 'EC2_SECRET_KEY', 'x' * 64)}
body = {'id': 'autotest', 'type': 'mock', 'creds': json.dumps(creds)}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
url = '/api/flavors'
body = {'id': 'autotest', 'provider': 'autotest',
'params': json.dumps({'region': 'us-west-2', 'instance_size': 'm1.medium'})}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client.post('/api/formations', json.dumps(
{'id': 'autotest', 'domain': 'localhost.localdomain'}),
content_type='application/json')
self.assertEqual(response.status_code, 201)
# create & scale a basic formation
formation_id = 'autotest'
url = '/api/formations/{formation_id}/layers'.format(**locals())
body = {'id': 'proxy', 'flavor': 'autotest', 'proxy': True,
'run_list': 'recipe[deis::proxy]'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
url = '/api/formations/{formation_id}/layers'.format(**locals())
body = {'id': 'runtime', 'flavor': 'autotest', 'runtime': True,
'run_list': 'recipe[deis::runtime]'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
url = '/api/formations/{formation_id}/scale'.format(**locals())
body = {'proxy': 2, 'runtime': 4}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_container(self):
url = '/api/apps'
body = {'formation': 'autotest'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# should start with zero
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
# scale up
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 4, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 6)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
# test listing/retrieving container info
url = "/api/apps/{app_id}/containers/web".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 4)
num = response.data['results'][0]['num']
url = "/api/apps/{app_id}/containers/web/{num}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['num'], num)
# scale down
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 2, 'worker': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
# scale down to 0
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 0, 'worker': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
def test_container_errors(self):
url = '/api/apps'
body = {'formation': 'autotest'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 'not_an_int'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertContains(response, 'Invalid scaling format', status_code=400)
def test_container_single_layer(self):
# create & scale a single layer formation
response = self.client.post('/api/formations', json.dumps(
{'id': 'single-layer', 'domain': 'localhost.localdomain'}),
content_type='application/json')
self.assertEqual(response.status_code, 201)
formation_id = 'single-layer'
url = '/api/formations/{formation_id}/layers'.format(**locals())
body = {'id': 'default', 'flavor': 'autotest', 'proxy': True, 'runtime': True,
'run_list': 'recipe[deis::runtime],recipe[deis::proxy]'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
url = '/api/formations/{formation_id}/scale'.format(**locals())
body = {'default': 4}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = '/api/apps'
body = {'formation': 'single-layer'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# should start with zero
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
# scale up
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 4, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 6)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
# scale down
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 2, 'worker': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
# scale down to 0
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 0, 'worker': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
url = "/api/apps/{app_id}".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['containers'], json.dumps(body))
def test_container_multiple_apps(self):
url = '/api/apps'
body = {'formation': 'autotest'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app1_id = response.data['id']
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app2_id = response.data['id']
# scale up
url = "/api/apps/{app1_id}/scale".format(**locals())
body = {'web': 4, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app2_id}/scale".format(**locals())
body = {'web': 4, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app1_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 6)
url = "/api/apps/{app2_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 6)
# check port assignments
url = '/api/formations/autotest/calculate'
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
databag = response.data.copy()
ports = []
for app in databag['apps'].values():
for containers_set in app['containers'].values():
for node_port in containers_set.values():
_, port = node_port.split(':')
ports.append(int(port))
ports.sort()
self.assertEqual(ports, range(10001, 10013))
# scale down
url = "/api/apps/{app1_id}/scale".format(**locals())
body = {'web': 2, 'worker': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app2_id}/scale".format(**locals())
body = {'web': 2, 'worker': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app1_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
url = "/api/apps/{app2_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 3)
# check port assignments
url = '/api/formations/autotest/calculate'
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
databag = response.data.copy()
ports = []
for app in databag['apps'].values():
for containers_set in app['containers'].values():
for node_port in containers_set.values():
_, port = node_port.split(':')
ports.append(int(port))
ports.sort()
self.assertEqual(len(set(ports)), 6)
def test_container_allocation(self):
url = '/api/apps'
formation_id = 'autotest'
body = {'formation': formation_id}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# With 4 nodes and 13 web containers
url = "/api/formations/{formation_id}/scale".format(**locals())
body = {'runtime': 4}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 13}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that one node has 4 and 3 nodes have 3 containers
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(get_allocations(response.data['apps'][app_id]['containers']['web']),
[3, 3, 3, 4])
# With 1 node
url = "/api/formations/{formation_id}/scale".format(**locals())
body = {'runtime': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that the node has all 13 containers
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(get_allocations(response.data['apps'][app_id]['containers']['web']),
[13])
# With 2 nodes
url = "/api/formations/{formation_id}/scale".format(**locals())
body = {'runtime': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that one has 6 and the other has 7 containers
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(get_allocations(response.data['apps'][app_id]['containers']['web']),
[6, 7])
# With 8 containers
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 8}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that both have 4 containers
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(get_allocations(response.data['apps'][app_id]['containers']['web']),
[4, 4])
# With 0 nodes
url = "/api/formations/{formation_id}/scale".format(**locals())
body = {'runtime': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that there are no containers
self.assertNotIn('web', response.data['apps'][app_id]['containers'])
# With 5 containers
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 5}
response = self.client.post(url, json.dumps(body), content_type='application/json')
# test that we get an error message about runtime nodes
self.assertEqual(response.status_code, 400)
self.assertIn('No nodes available for containers', response.data)
# With 1 node
url = "/api/formations/{formation_id}/scale".format(**locals())
body = {'runtime': 1}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# test that it gets all 8 containers
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(get_allocations(response.data['apps'][app_id]['containers']['web']),
[8])
def test_container_balance(self):
url = '/api/apps'
formation_id = 'autotest'
body = {'formation': formation_id}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# scale layer
url = '/api/formations/{formation_id}/scale'.format(**locals())
body = {'runtime': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# should start with zero
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
# scale up
url = '/api/apps/{app_id}/scale'.format(**locals())
body = {'web': 8, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# scale layer up
url = '/api/formations/{formation_id}/scale'.format(**locals())
body = {'runtime': 4}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# calculate the formation
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url)
containers = response.data['apps'][app_id]['containers']
# check balance of web types
by_backend = {}
for c in containers['web'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
# check balance of worker types
by_backend = {}
for c in containers['worker'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
# scale up more
url = '/api/apps/{app_id}/scale'.format(**locals())
body = {'web': 6, 'worker': 4}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# calculate the formation
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url)
containers = response.data['apps'][app_id]['containers']
# check balance of web types
by_backend = {}
for c in containers['web'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
# check balance of worker types
by_backend = {}
for c in containers['worker'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
# scale down
url = '/api/apps/{app_id}/scale'.format(**locals())
body = {'web': 2, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 4)
# calculate the formation
url = "/api/formations/{formation_id}/calculate".format(**locals())
response = self.client.post(url)
containers = response.data['apps'][app_id]['containers']
# check balance of web types
by_backend = {}
for c in containers['web'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
# check balance of worker types
by_backend = {}
for c in containers['worker'].values():
backend, port = c.split(':')
by_backend.setdefault(backend, []).append(port)
b_min = min([len(by_backend[b]) for b in by_backend.keys()])
b_max = max([len(by_backend[b]) for b in by_backend.keys()])
self.assertLess(b_max - b_min, 2)
def test_container_str(self):
"""Test the text representation of a container."""
url = '/api/apps'
body = {'formation': 'autotest'}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# scale up
url = "/api/apps/{app_id}/scale".format(**locals())
body = {'web': 4, 'worker': 2}
response = self.client.post(url, json.dumps(body), content_type='application/json')
self.assertEqual(response.status_code, 200)
# should start with zero
url = "/api/apps/{app_id}/containers".format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 6)
uuid = response.data['results'][0]['uuid']
container = Container.objects.get(uuid=uuid)
self.assertEqual(container.short_name(),
"{}.{}".format(container.type, container.num))
self.assertEqual(str(container),
"{} {}".format(container.formation.id, container.short_name()))
| 50.57971
| 93
| 0.617929
| 2,960
| 24,430
| 4.992568
| 0.068581
| 0.102517
| 0.118284
| 0.135404
| 0.875626
| 0.87116
| 0.867709
| 0.859183
| 0.859183
| 0.851401
| 0
| 0.017856
| 0.225174
| 24,430
| 482
| 94
| 50.684647
| 0.762851
| 0.050675
| 0
| 0.810219
| 0
| 0
| 0.174456
| 0.081024
| 0
| 0
| 0
| 0
| 0.270073
| 1
| 0.021898
| false
| 0.002433
| 0.014599
| 0
| 0.043796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
613d93c21a48e8f81e6befea30c6642e340cd035
| 93
|
py
|
Python
|
connectomics/io/__init__.py
|
donglaiw/pytorch_connectomics
|
c79a3cc82f853a86e98930475f6355d0022916dd
|
[
"MIT"
] | 1
|
2020-05-17T08:01:56.000Z
|
2020-05-17T08:01:56.000Z
|
connectomics/io/__init__.py
|
donglaiw/pytorch_connectomics
|
c79a3cc82f853a86e98930475f6355d0022916dd
|
[
"MIT"
] | null | null | null |
connectomics/io/__init__.py
|
donglaiw/pytorch_connectomics
|
c79a3cc82f853a86e98930475f6355d0022916dd
|
[
"MIT"
] | 3
|
2020-03-31T21:40:12.000Z
|
2021-06-09T02:26:43.000Z
|
from .io_file import *
from .io_data import *
from .io_model import *
from .io_args import *
| 18.6
| 23
| 0.741935
| 16
| 93
| 4.0625
| 0.4375
| 0.369231
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172043
| 93
| 4
| 24
| 23.25
| 0.844156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
614b329e1c2c60f647f003ac79698e2e520b5178
| 180
|
py
|
Python
|
joommf/tcl_proc_eval.py
|
fangohr/oommf-python
|
9c9f617c4efe4b488f01703186c1126070ea5d3f
|
[
"BSD-2-Clause"
] | 7
|
2016-01-25T09:36:46.000Z
|
2021-09-03T01:42:19.000Z
|
joommf/tcl_proc_eval.py
|
fangohr/oommf-python
|
9c9f617c4efe4b488f01703186c1126070ea5d3f
|
[
"BSD-2-Clause"
] | 1
|
2016-03-07T17:11:44.000Z
|
2016-03-07T17:11:44.000Z
|
joommf/tcl_proc_eval.py
|
fangohr/oommf-python
|
9c9f617c4efe4b488f01703186c1126070ea5d3f
|
[
"BSD-2-Clause"
] | 9
|
2015-09-30T10:53:06.000Z
|
2021-05-12T20:21:52.000Z
|
def evaluate_tcl_proc(function, functionname, x, y, z):
test.tk.eval(function)
return [float(x) for x in test.tk.eval("{} {} {} {}".format(functionname, x, y, z)).split()]
| 45
| 96
| 0.638889
| 28
| 180
| 4.035714
| 0.642857
| 0.230089
| 0.247788
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 180
| 3
| 97
| 60
| 0.738562
| 0
| 0
| 0
| 0
| 0
| 0.061111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
4ef8febdde7fba3253a620b9f6e5e9ce2ac43d97
| 89
|
py
|
Python
|
Redis/__init__.py
|
lkean9/GrabTaxi
|
93d916dff777ac69ad83973fa00704ed2ae110ee
|
[
"MIT"
] | null | null | null |
Redis/__init__.py
|
lkean9/GrabTaxi
|
93d916dff777ac69ad83973fa00704ed2ae110ee
|
[
"MIT"
] | null | null | null |
Redis/__init__.py
|
lkean9/GrabTaxi
|
93d916dff777ac69ad83973fa00704ed2ae110ee
|
[
"MIT"
] | null | null | null |
import redis
from Redis.redis_helper import Redis_helper
redis_helper = Redis_helper()
| 14.833333
| 43
| 0.831461
| 13
| 89
| 5.384615
| 0.307692
| 0.628571
| 0.457143
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123596
| 89
| 5
| 44
| 17.8
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f64a4d7d24d35854c06b00b1bbecc520ec43cf67
| 361
|
py
|
Python
|
pava/implementation/natives/sun/java2d/loops/DrawRect.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | 4
|
2017-03-30T16:51:16.000Z
|
2020-10-05T12:25:47.000Z
|
pava/implementation/natives/sun/java2d/loops/DrawRect.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
pava/implementation/natives/sun/java2d/loops/DrawRect.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
def add_native_methods(clazz):
def DrawRect__sun_java2d_SunGraphics2D__sun_java2d_SurfaceData__int__int__int__int__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
clazz.DrawRect__sun_java2d_SunGraphics2D__sun_java2d_SurfaceData__int__int__int__int__ = DrawRect__sun_java2d_SunGraphics2D__sun_java2d_SurfaceData__int__int__int__int__
| 51.571429
| 173
| 0.864266
| 49
| 361
| 5.22449
| 0.408163
| 0.210938
| 0.210938
| 0.351563
| 0.726563
| 0.726563
| 0.726563
| 0.726563
| 0.726563
| 0.726563
| 0
| 0.04878
| 0.091413
| 361
| 6
| 174
| 60.166667
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
211d0308e4c38af9ca89536d85f7eab39ff02fbb
| 31,908
|
py
|
Python
|
Segmentation/pruning.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | 47
|
2020-12-15T03:40:50.000Z
|
2022-03-30T03:38:29.000Z
|
Segmentation/pruning.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | null | null | null |
Segmentation/pruning.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | 10
|
2021-03-17T01:28:57.000Z
|
2022-02-24T20:23:57.000Z
|
import pdb
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.prune as prune
def remove_model(model):
parameters_to_prune =[]
for m in model.modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m,'weight'))
for module_pair in parameters_to_prune:
prune.remove(module_pair[0], module_pair[1])
def prunning_and_rewind_module(model, sim_ckpt, px):
print("INFO: Rruning Percent: [{}]".format(px))
pruning_model(model, px)
prun_model_dict = model.module.state_dict() # 375
ori_num = len(prun_model_dict.keys())
unpruned_state_dict = {k : v for k , v in sim_ckpt.items() if k in prun_model_dict.keys()}
pruned_state_dict = {k + '_orig': v for k , v in sim_ckpt.items() if k + '_orig' in prun_model_dict.keys()}
new_num = len(unpruned_state_dict.keys()) + len(pruned_state_dict.keys())
prun_model_dict.update(unpruned_state_dict)
prun_model_dict.update(pruned_state_dict)
print("INFO: Reload...[{}/{}]".format(new_num, ori_num))
model.module.load_state_dict(prun_model_dict)
see_zero_rate(model)
def prunning_and_rewind(model, epoch0_ckpt, px):
print("INFO: Pruning Percent: [{}]".format(px))
pruning_model(model, px)
prun_model_dict = model.state_dict() # 375
ori_num = len(prun_model_dict.keys())
unpruned_state_dict = {k : v for k , v in epoch0_ckpt.items() if k in prun_model_dict.keys()}
pruned_state_dict = {k + '_orig': v for k , v in epoch0_ckpt.items() if k + '_orig' in prun_model_dict.keys()}
new_num = len(unpruned_state_dict.keys()) + len(pruned_state_dict.keys())
prun_model_dict.update(unpruned_state_dict)
prun_model_dict.update(pruned_state_dict)
print("INFO: Reload...[{}/{}]".format(new_num, ori_num))
model.load_state_dict(prun_model_dict)
see_zero_rate(model)
def rewind_model(model, epoch0_ckpt):
prun_model_dict = model.state_dict() # 375
ori_num = len(prun_model_dict.keys())
unpruned_state_dict = {k : v for k , v in epoch0_ckpt.items() if k in prun_model_dict.keys()}
pruned_state_dict = {k + '_orig': v for k , v in epoch0_ckpt.items() if k + '_orig' in prun_model_dict.keys()}
new_num = len(unpruned_state_dict.keys()) + len(pruned_state_dict.keys())
prun_model_dict.update(unpruned_state_dict)
prun_model_dict.update(pruned_state_dict)
print("INFO: Rewind...[{}/{}]".format(new_num, ori_num))
model.load_state_dict(prun_model_dict)
see_zero_rate(model)
def pruning_model(model, px, exclude_first=False):
parameters_to_prune =[]
for m in model.modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m,'weight'))
if exclude_first:
parameters_to_prune = parameters_to_prune[1:]
print("Exclude first conv")
parameters_to_prune = tuple(parameters_to_prune)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=px,
)
def see_zero_rate(model):
sum_list = 0
zero_sum = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
sum_list = sum_list + float(m.weight.nelement())
zero_sum = zero_sum + float(torch.sum(m.weight == 0))
print('INFO: Remain Weight [{:.4f}%] '.format(100 * (1 - zero_sum / sum_list)))
def simclr_pruning_model_custom_res50v1(model, mask_dict, no_conv1=True):
module_to_prune = []
mask_to_prune = []
if no_conv1 == False:
module_to_prune.append(model.conv1)
mask_to_prune.append(mask_dict['conv1.weight_mask'])
#layer1
module_to_prune.append(model.layer1[0].conv1)
mask_to_prune.append(mask_dict['layer1.0.conv1.weight_mask'])
module_to_prune.append(model.layer1[0].conv2)
mask_to_prune.append(mask_dict['layer1.0.conv2.weight_mask'])
module_to_prune.append(model.layer1[0].conv3)
mask_to_prune.append(mask_dict['layer1.0.conv3.weight_mask'])
module_to_prune.append(model.layer1[0].downsample[0])
mask_to_prune.append(mask_dict['layer1.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer1[1].conv1)
mask_to_prune.append(mask_dict['layer1.1.conv1.weight_mask'])
module_to_prune.append(model.layer1[1].conv2)
mask_to_prune.append(mask_dict['layer1.1.conv2.weight_mask'])
module_to_prune.append(model.layer1[1].conv3)
mask_to_prune.append(mask_dict['layer1.1.conv3.weight_mask'])
module_to_prune.append(model.layer1[2].conv1)
mask_to_prune.append(mask_dict['layer1.2.conv1.weight_mask'])
module_to_prune.append(model.layer1[2].conv2)
mask_to_prune.append(mask_dict['layer1.2.conv2.weight_mask'])
module_to_prune.append(model.layer1[2].conv3)
mask_to_prune.append(mask_dict['layer1.2.conv3.weight_mask'])
#layer2
module_to_prune.append(model.layer2[0].conv1)
mask_to_prune.append(mask_dict['layer2.0.conv1.weight_mask'])
module_to_prune.append(model.layer2[0].conv2)
mask_to_prune.append(mask_dict['layer2.0.conv2.weight_mask'])
module_to_prune.append(model.layer2[0].conv3)
mask_to_prune.append(mask_dict['layer2.0.conv3.weight_mask'])
module_to_prune.append(model.layer2[0].downsample[0])
mask_to_prune.append(mask_dict['layer2.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer2[1].conv1)
mask_to_prune.append(mask_dict['layer2.1.conv1.weight_mask'])
module_to_prune.append(model.layer2[1].conv2)
mask_to_prune.append(mask_dict['layer2.1.conv2.weight_mask'])
module_to_prune.append(model.layer2[1].conv3)
mask_to_prune.append(mask_dict['layer2.1.conv3.weight_mask'])
module_to_prune.append(model.layer2[2].conv1)
mask_to_prune.append(mask_dict['layer2.2.conv1.weight_mask'])
module_to_prune.append(model.layer2[2].conv2)
mask_to_prune.append(mask_dict['layer2.2.conv2.weight_mask'])
module_to_prune.append(model.layer2[2].conv3)
mask_to_prune.append(mask_dict['layer2.2.conv3.weight_mask'])
module_to_prune.append(model.layer2[3].conv1)
mask_to_prune.append(mask_dict['layer2.3.conv1.weight_mask'])
module_to_prune.append(model.layer2[3].conv2)
mask_to_prune.append(mask_dict['layer2.3.conv2.weight_mask'])
module_to_prune.append(model.layer2[3].conv3)
mask_to_prune.append(mask_dict['layer2.3.conv3.weight_mask'])
#layer3
module_to_prune.append(model.layer3[0].conv1)
mask_to_prune.append(mask_dict['layer3.0.conv1.weight_mask'])
module_to_prune.append(model.layer3[0].conv2)
mask_to_prune.append(mask_dict['layer3.0.conv2.weight_mask'])
module_to_prune.append(model.layer3[0].conv3)
mask_to_prune.append(mask_dict['layer3.0.conv3.weight_mask'])
module_to_prune.append(model.layer3[0].downsample[0])
mask_to_prune.append(mask_dict['layer3.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer3[1].conv1)
mask_to_prune.append(mask_dict['layer3.1.conv1.weight_mask'])
module_to_prune.append(model.layer3[1].conv2)
mask_to_prune.append(mask_dict['layer3.1.conv2.weight_mask'])
module_to_prune.append(model.layer3[1].conv3)
mask_to_prune.append(mask_dict['layer3.1.conv3.weight_mask'])
module_to_prune.append(model.layer3[2].conv1)
mask_to_prune.append(mask_dict['layer3.2.conv1.weight_mask'])
module_to_prune.append(model.layer3[2].conv2)
mask_to_prune.append(mask_dict['layer3.2.conv2.weight_mask'])
module_to_prune.append(model.layer3[2].conv3)
mask_to_prune.append(mask_dict['layer3.2.conv3.weight_mask'])
module_to_prune.append(model.layer3[3].conv1)
mask_to_prune.append(mask_dict['layer3.3.conv1.weight_mask'])
module_to_prune.append(model.layer3[3].conv2)
mask_to_prune.append(mask_dict['layer3.3.conv2.weight_mask'])
module_to_prune.append(model.layer3[3].conv3)
mask_to_prune.append(mask_dict['layer3.3.conv3.weight_mask'])
module_to_prune.append(model.layer3[4].conv1)
mask_to_prune.append(mask_dict['layer3.4.conv1.weight_mask'])
module_to_prune.append(model.layer3[4].conv2)
mask_to_prune.append(mask_dict['layer3.4.conv2.weight_mask'])
module_to_prune.append(model.layer3[4].conv3)
mask_to_prune.append(mask_dict['layer3.4.conv3.weight_mask'])
module_to_prune.append(model.layer3[5].conv1)
mask_to_prune.append(mask_dict['layer3.5.conv1.weight_mask'])
module_to_prune.append(model.layer3[5].conv2)
mask_to_prune.append(mask_dict['layer3.5.conv2.weight_mask'])
module_to_prune.append(model.layer3[5].conv3)
mask_to_prune.append(mask_dict['layer3.5.conv3.weight_mask'])
#layer4
module_to_prune.append(model.layer4[0].conv1)
mask_to_prune.append(mask_dict['layer4.0.conv1.weight_mask'])
module_to_prune.append(model.layer4[0].conv2)
mask_to_prune.append(mask_dict['layer4.0.conv2.weight_mask'])
module_to_prune.append(model.layer4[0].conv3)
mask_to_prune.append(mask_dict['layer4.0.conv3.weight_mask'])
module_to_prune.append(model.layer4[0].downsample[0])
mask_to_prune.append(mask_dict['layer4.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer4[1].conv1)
mask_to_prune.append(mask_dict['layer4.1.conv1.weight_mask'])
module_to_prune.append(model.layer4[1].conv2)
mask_to_prune.append(mask_dict['layer4.1.conv2.weight_mask'])
module_to_prune.append(model.layer4[1].conv3)
mask_to_prune.append(mask_dict['layer4.1.conv3.weight_mask'])
module_to_prune.append(model.layer4[2].conv1)
mask_to_prune.append(mask_dict['layer4.2.conv1.weight_mask'])
module_to_prune.append(model.layer4[2].conv2)
mask_to_prune.append(mask_dict['layer4.2.conv2.weight_mask'])
module_to_prune.append(model.layer4[2].conv3)
mask_to_prune.append(mask_dict['layer4.2.conv3.weight_mask'])
for ii in range(len(module_to_prune)):
prune.CustomFromMask.apply(module_to_prune[ii], 'weight', mask=mask_to_prune[ii])
def simclr_pruning_module_model_custom_res50v1(model, mask_dict, no_conv1=True):
module_to_prune = []
mask_to_prune = []
if no_conv1 == False:
module_to_prune.append(model.conv1)
mask_to_prune.append(mask_dict['module.conv1.weight_mask'])
#module.layer1
module_to_prune.append(model.layer1[0].conv1)
mask_to_prune.append(mask_dict['module.layer1.0.conv1.weight_mask'])
module_to_prune.append(model.layer1[0].conv2)
mask_to_prune.append(mask_dict['module.layer1.0.conv2.weight_mask'])
module_to_prune.append(model.layer1[0].conv3)
mask_to_prune.append(mask_dict['module.layer1.0.conv3.weight_mask'])
module_to_prune.append(model.layer1[0].downsample[0])
mask_to_prune.append(mask_dict['module.layer1.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer1[1].conv1)
mask_to_prune.append(mask_dict['module.layer1.1.conv1.weight_mask'])
module_to_prune.append(model.layer1[1].conv2)
mask_to_prune.append(mask_dict['module.layer1.1.conv2.weight_mask'])
module_to_prune.append(model.layer1[1].conv3)
mask_to_prune.append(mask_dict['module.layer1.1.conv3.weight_mask'])
module_to_prune.append(model.layer1[2].conv1)
mask_to_prune.append(mask_dict['module.layer1.2.conv1.weight_mask'])
module_to_prune.append(model.layer1[2].conv2)
mask_to_prune.append(mask_dict['module.layer1.2.conv2.weight_mask'])
module_to_prune.append(model.layer1[2].conv3)
mask_to_prune.append(mask_dict['module.layer1.2.conv3.weight_mask'])
#module.layer2
module_to_prune.append(model.layer2[0].conv1)
mask_to_prune.append(mask_dict['module.layer2.0.conv1.weight_mask'])
module_to_prune.append(model.layer2[0].conv2)
mask_to_prune.append(mask_dict['module.layer2.0.conv2.weight_mask'])
module_to_prune.append(model.layer2[0].conv3)
mask_to_prune.append(mask_dict['module.layer2.0.conv3.weight_mask'])
module_to_prune.append(model.layer2[0].downsample[0])
mask_to_prune.append(mask_dict['module.layer2.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer2[1].conv1)
mask_to_prune.append(mask_dict['module.layer2.1.conv1.weight_mask'])
module_to_prune.append(model.layer2[1].conv2)
mask_to_prune.append(mask_dict['module.layer2.1.conv2.weight_mask'])
module_to_prune.append(model.layer2[1].conv3)
mask_to_prune.append(mask_dict['module.layer2.1.conv3.weight_mask'])
module_to_prune.append(model.layer2[2].conv1)
mask_to_prune.append(mask_dict['module.layer2.2.conv1.weight_mask'])
module_to_prune.append(model.layer2[2].conv2)
mask_to_prune.append(mask_dict['module.layer2.2.conv2.weight_mask'])
module_to_prune.append(model.layer2[2].conv3)
mask_to_prune.append(mask_dict['module.layer2.2.conv3.weight_mask'])
module_to_prune.append(model.layer2[3].conv1)
mask_to_prune.append(mask_dict['module.layer2.3.conv1.weight_mask'])
module_to_prune.append(model.layer2[3].conv2)
mask_to_prune.append(mask_dict['module.layer2.3.conv2.weight_mask'])
module_to_prune.append(model.layer2[3].conv3)
mask_to_prune.append(mask_dict['module.layer2.3.conv3.weight_mask'])
#module.layer3
module_to_prune.append(model.layer3[0].conv1)
mask_to_prune.append(mask_dict['module.layer3.0.conv1.weight_mask'])
module_to_prune.append(model.layer3[0].conv2)
mask_to_prune.append(mask_dict['module.layer3.0.conv2.weight_mask'])
module_to_prune.append(model.layer3[0].conv3)
mask_to_prune.append(mask_dict['module.layer3.0.conv3.weight_mask'])
module_to_prune.append(model.layer3[0].downsample[0])
mask_to_prune.append(mask_dict['module.layer3.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer3[1].conv1)
mask_to_prune.append(mask_dict['module.layer3.1.conv1.weight_mask'])
module_to_prune.append(model.layer3[1].conv2)
mask_to_prune.append(mask_dict['module.layer3.1.conv2.weight_mask'])
module_to_prune.append(model.layer3[1].conv3)
mask_to_prune.append(mask_dict['module.layer3.1.conv3.weight_mask'])
module_to_prune.append(model.layer3[2].conv1)
mask_to_prune.append(mask_dict['module.layer3.2.conv1.weight_mask'])
module_to_prune.append(model.layer3[2].conv2)
mask_to_prune.append(mask_dict['module.layer3.2.conv2.weight_mask'])
module_to_prune.append(model.layer3[2].conv3)
mask_to_prune.append(mask_dict['module.layer3.2.conv3.weight_mask'])
module_to_prune.append(model.layer3[3].conv1)
mask_to_prune.append(mask_dict['module.layer3.3.conv1.weight_mask'])
module_to_prune.append(model.layer3[3].conv2)
mask_to_prune.append(mask_dict['module.layer3.3.conv2.weight_mask'])
module_to_prune.append(model.layer3[3].conv3)
mask_to_prune.append(mask_dict['module.layer3.3.conv3.weight_mask'])
module_to_prune.append(model.layer3[4].conv1)
mask_to_prune.append(mask_dict['module.layer3.4.conv1.weight_mask'])
module_to_prune.append(model.layer3[4].conv2)
mask_to_prune.append(mask_dict['module.layer3.4.conv2.weight_mask'])
module_to_prune.append(model.layer3[4].conv3)
mask_to_prune.append(mask_dict['module.layer3.4.conv3.weight_mask'])
module_to_prune.append(model.layer3[5].conv1)
mask_to_prune.append(mask_dict['module.layer3.5.conv1.weight_mask'])
module_to_prune.append(model.layer3[5].conv2)
mask_to_prune.append(mask_dict['module.layer3.5.conv2.weight_mask'])
module_to_prune.append(model.layer3[5].conv3)
mask_to_prune.append(mask_dict['module.layer3.5.conv3.weight_mask'])
#module.layer4
module_to_prune.append(model.layer4[0].conv1)
mask_to_prune.append(mask_dict['module.layer4.0.conv1.weight_mask'])
module_to_prune.append(model.layer4[0].conv2)
mask_to_prune.append(mask_dict['module.layer4.0.conv2.weight_mask'])
module_to_prune.append(model.layer4[0].conv3)
mask_to_prune.append(mask_dict['module.layer4.0.conv3.weight_mask'])
module_to_prune.append(model.layer4[0].downsample[0])
mask_to_prune.append(mask_dict['module.layer4.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer4[1].conv1)
mask_to_prune.append(mask_dict['module.layer4.1.conv1.weight_mask'])
module_to_prune.append(model.layer4[1].conv2)
mask_to_prune.append(mask_dict['module.layer4.1.conv2.weight_mask'])
module_to_prune.append(model.layer4[1].conv3)
mask_to_prune.append(mask_dict['module.layer4.1.conv3.weight_mask'])
module_to_prune.append(model.layer4[2].conv1)
mask_to_prune.append(mask_dict['module.layer4.2.conv1.weight_mask'])
module_to_prune.append(model.layer4[2].conv2)
mask_to_prune.append(mask_dict['module.layer4.2.conv2.weight_mask'])
module_to_prune.append(model.layer4[2].conv3)
mask_to_prune.append(mask_dict['module.layer4.2.conv3.weight_mask'])
for ii in range(len(module_to_prune)):
prune.CustomFromMask.apply(module_to_prune[ii], 'weight', mask=mask_to_prune[ii])
def moco_pruning_model_custom_res50v1(model, mask_dict, no_conv1=True):
module_to_prune = []
mask_to_prune = []
if no_conv1 == False:
module_to_prune.append(model.conv1)
mask_to_prune.append(mask_dict['conv1.weight_mask'])
for k in mask_dict.keys():
print(k)
print("mask len:{}".format(len(mask_dict.keys())))
#layer1
module_to_prune.append(model.layer1[0].conv1)
mask_to_prune.append(mask_dict['layer1.0.conv1.weight_mask'])
module_to_prune.append(model.layer1[0].conv2)
mask_to_prune.append(mask_dict['layer1.0.conv2.weight_mask'])
module_to_prune.append(model.layer1[0].conv3)
mask_to_prune.append(mask_dict['layer1.0.conv3.weight_mask'])
module_to_prune.append(model.layer1[0].downsample[0])
mask_to_prune.append(mask_dict['layer1.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer1[1].conv1)
mask_to_prune.append(mask_dict['layer1.1.conv1.weight_mask'])
module_to_prune.append(model.layer1[1].conv2)
mask_to_prune.append(mask_dict['layer1.1.conv2.weight_mask'])
module_to_prune.append(model.layer1[1].conv3)
mask_to_prune.append(mask_dict['layer1.1.conv3.weight_mask'])
module_to_prune.append(model.layer1[2].conv1)
mask_to_prune.append(mask_dict['layer1.2.conv1.weight_mask'])
module_to_prune.append(model.layer1[2].conv2)
mask_to_prune.append(mask_dict['layer1.2.conv2.weight_mask'])
module_to_prune.append(model.layer1[2].conv3)
mask_to_prune.append(mask_dict['layer1.2.conv3.weight_mask'])
#layer2
module_to_prune.append(model.layer2[0].conv1)
mask_to_prune.append(mask_dict['layer2.0.conv1.weight_mask'])
module_to_prune.append(model.layer2[0].conv2)
mask_to_prune.append(mask_dict['layer2.0.conv2.weight_mask'])
module_to_prune.append(model.layer2[0].conv3)
mask_to_prune.append(mask_dict['layer2.0.conv3.weight_mask'])
module_to_prune.append(model.layer2[0].downsample[0])
mask_to_prune.append(mask_dict['layer2.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer2[1].conv1)
mask_to_prune.append(mask_dict['layer2.1.conv1.weight_mask'])
module_to_prune.append(model.layer2[1].conv2)
mask_to_prune.append(mask_dict['layer2.1.conv2.weight_mask'])
module_to_prune.append(model.layer2[1].conv3)
mask_to_prune.append(mask_dict['layer2.1.conv3.weight_mask'])
module_to_prune.append(model.layer2[2].conv1)
mask_to_prune.append(mask_dict['layer2.2.conv1.weight_mask'])
module_to_prune.append(model.layer2[2].conv2)
mask_to_prune.append(mask_dict['layer2.2.conv2.weight_mask'])
module_to_prune.append(model.layer2[2].conv3)
mask_to_prune.append(mask_dict['layer2.2.conv3.weight_mask'])
module_to_prune.append(model.layer2[3].conv1)
mask_to_prune.append(mask_dict['layer2.3.conv1.weight_mask'])
module_to_prune.append(model.layer2[3].conv2)
mask_to_prune.append(mask_dict['layer2.3.conv2.weight_mask'])
module_to_prune.append(model.layer2[3].conv3)
mask_to_prune.append(mask_dict['layer2.3.conv3.weight_mask'])
#layer3
module_to_prune.append(model.layer3[0].conv1)
mask_to_prune.append(mask_dict['layer3.0.conv1.weight_mask'])
module_to_prune.append(model.layer3[0].conv2)
mask_to_prune.append(mask_dict['layer3.0.conv2.weight_mask'])
module_to_prune.append(model.layer3[0].conv3)
mask_to_prune.append(mask_dict['layer3.0.conv3.weight_mask'])
module_to_prune.append(model.layer3[0].downsample[0])
mask_to_prune.append(mask_dict['layer3.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer3[1].conv1)
mask_to_prune.append(mask_dict['layer3.1.conv1.weight_mask'])
module_to_prune.append(model.layer3[1].conv2)
mask_to_prune.append(mask_dict['layer3.1.conv2.weight_mask'])
module_to_prune.append(model.layer3[1].conv3)
mask_to_prune.append(mask_dict['layer3.1.conv3.weight_mask'])
module_to_prune.append(model.layer3[2].conv1)
mask_to_prune.append(mask_dict['layer3.2.conv1.weight_mask'])
module_to_prune.append(model.layer3[2].conv2)
mask_to_prune.append(mask_dict['layer3.2.conv2.weight_mask'])
module_to_prune.append(model.layer3[2].conv3)
mask_to_prune.append(mask_dict['layer3.2.conv3.weight_mask'])
module_to_prune.append(model.layer3[3].conv1)
mask_to_prune.append(mask_dict['layer3.3.conv1.weight_mask'])
module_to_prune.append(model.layer3[3].conv2)
mask_to_prune.append(mask_dict['layer3.3.conv2.weight_mask'])
module_to_prune.append(model.layer3[3].conv3)
mask_to_prune.append(mask_dict['layer3.3.conv3.weight_mask'])
module_to_prune.append(model.layer3[4].conv1)
mask_to_prune.append(mask_dict['layer3.4.conv1.weight_mask'])
module_to_prune.append(model.layer3[4].conv2)
mask_to_prune.append(mask_dict['layer3.4.conv2.weight_mask'])
module_to_prune.append(model.layer3[4].conv3)
mask_to_prune.append(mask_dict['layer3.4.conv3.weight_mask'])
module_to_prune.append(model.layer3[5].conv1)
mask_to_prune.append(mask_dict['layer3.5.conv1.weight_mask'])
module_to_prune.append(model.layer3[5].conv2)
mask_to_prune.append(mask_dict['layer3.5.conv2.weight_mask'])
module_to_prune.append(model.layer3[5].conv3)
mask_to_prune.append(mask_dict['layer3.5.conv3.weight_mask'])
#layer4
module_to_prune.append(model.layer4[0].conv1)
mask_to_prune.append(mask_dict['layer4.0.conv1.weight_mask'])
module_to_prune.append(model.layer4[0].conv2)
mask_to_prune.append(mask_dict['layer4.0.conv2.weight_mask'])
module_to_prune.append(model.layer4[0].conv3)
mask_to_prune.append(mask_dict['layer4.0.conv3.weight_mask'])
module_to_prune.append(model.layer4[0].downsample[0])
mask_to_prune.append(mask_dict['layer4.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer4[1].conv1)
mask_to_prune.append(mask_dict['layer4.1.conv1.weight_mask'])
module_to_prune.append(model.layer4[1].conv2)
mask_to_prune.append(mask_dict['layer4.1.conv2.weight_mask'])
module_to_prune.append(model.layer4[1].conv3)
mask_to_prune.append(mask_dict['layer4.1.conv3.weight_mask'])
module_to_prune.append(model.layer4[2].conv1)
mask_to_prune.append(mask_dict['layer4.2.conv1.weight_mask'])
module_to_prune.append(model.layer4[2].conv2)
mask_to_prune.append(mask_dict['layer4.2.conv2.weight_mask'])
module_to_prune.append(model.layer4[2].conv3)
mask_to_prune.append(mask_dict['layer4.2.conv3.weight_mask'])
for ii in range(len(module_to_prune)):
prune.CustomFromMask.apply(module_to_prune[ii], 'weight', mask=mask_to_prune[ii])
def imagenet_pruning_model_custom_res50v1(model, mask_dict, no_conv1=True):
module_to_prune = []
mask_to_prune = []
if no_conv1 == False:
module_to_prune.append(model.conv1)
mask_to_prune.append(mask_dict['conv1.weight_mask'])
#layer1
module_to_prune.append(model.layer1[0].conv1)
mask_to_prune.append(mask_dict['layer1.0.conv1.weight_mask'])
module_to_prune.append(model.layer1[0].conv2)
mask_to_prune.append(mask_dict['layer1.0.conv2.weight_mask'])
module_to_prune.append(model.layer1[0].conv3)
mask_to_prune.append(mask_dict['layer1.0.conv3.weight_mask'])
module_to_prune.append(model.layer1[0].downsample[0])
mask_to_prune.append(mask_dict['layer1.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer1[1].conv1)
mask_to_prune.append(mask_dict['layer1.1.conv1.weight_mask'])
module_to_prune.append(model.layer1[1].conv2)
mask_to_prune.append(mask_dict['layer1.1.conv2.weight_mask'])
module_to_prune.append(model.layer1[1].conv3)
mask_to_prune.append(mask_dict['layer1.1.conv3.weight_mask'])
module_to_prune.append(model.layer1[2].conv1)
mask_to_prune.append(mask_dict['layer1.2.conv1.weight_mask'])
module_to_prune.append(model.layer1[2].conv2)
mask_to_prune.append(mask_dict['layer1.2.conv2.weight_mask'])
module_to_prune.append(model.layer1[2].conv3)
mask_to_prune.append(mask_dict['layer1.2.conv3.weight_mask'])
#layer2
module_to_prune.append(model.layer2[0].conv1)
mask_to_prune.append(mask_dict['layer2.0.conv1.weight_mask'])
module_to_prune.append(model.layer2[0].conv2)
mask_to_prune.append(mask_dict['layer2.0.conv2.weight_mask'])
module_to_prune.append(model.layer2[0].conv3)
mask_to_prune.append(mask_dict['layer2.0.conv3.weight_mask'])
module_to_prune.append(model.layer2[0].downsample[0])
mask_to_prune.append(mask_dict['layer2.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer2[1].conv1)
mask_to_prune.append(mask_dict['layer2.1.conv1.weight_mask'])
module_to_prune.append(model.layer2[1].conv2)
mask_to_prune.append(mask_dict['layer2.1.conv2.weight_mask'])
module_to_prune.append(model.layer2[1].conv3)
mask_to_prune.append(mask_dict['layer2.1.conv3.weight_mask'])
module_to_prune.append(model.layer2[2].conv1)
mask_to_prune.append(mask_dict['layer2.2.conv1.weight_mask'])
module_to_prune.append(model.layer2[2].conv2)
mask_to_prune.append(mask_dict['layer2.2.conv2.weight_mask'])
module_to_prune.append(model.layer2[2].conv3)
mask_to_prune.append(mask_dict['layer2.2.conv3.weight_mask'])
module_to_prune.append(model.layer2[3].conv1)
mask_to_prune.append(mask_dict['layer2.3.conv1.weight_mask'])
module_to_prune.append(model.layer2[3].conv2)
mask_to_prune.append(mask_dict['layer2.3.conv2.weight_mask'])
module_to_prune.append(model.layer2[3].conv3)
mask_to_prune.append(mask_dict['layer2.3.conv3.weight_mask'])
#layer3
module_to_prune.append(model.layer3[0].conv1)
mask_to_prune.append(mask_dict['layer3.0.conv1.weight_mask'])
module_to_prune.append(model.layer3[0].conv2)
mask_to_prune.append(mask_dict['layer3.0.conv2.weight_mask'])
module_to_prune.append(model.layer3[0].conv3)
mask_to_prune.append(mask_dict['layer3.0.conv3.weight_mask'])
module_to_prune.append(model.layer3[0].downsample[0])
mask_to_prune.append(mask_dict['layer3.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer3[1].conv1)
mask_to_prune.append(mask_dict['layer3.1.conv1.weight_mask'])
module_to_prune.append(model.layer3[1].conv2)
mask_to_prune.append(mask_dict['layer3.1.conv2.weight_mask'])
module_to_prune.append(model.layer3[1].conv3)
mask_to_prune.append(mask_dict['layer3.1.conv3.weight_mask'])
module_to_prune.append(model.layer3[2].conv1)
mask_to_prune.append(mask_dict['layer3.2.conv1.weight_mask'])
module_to_prune.append(model.layer3[2].conv2)
mask_to_prune.append(mask_dict['layer3.2.conv2.weight_mask'])
module_to_prune.append(model.layer3[2].conv3)
mask_to_prune.append(mask_dict['layer3.2.conv3.weight_mask'])
module_to_prune.append(model.layer3[3].conv1)
mask_to_prune.append(mask_dict['layer3.3.conv1.weight_mask'])
module_to_prune.append(model.layer3[3].conv2)
mask_to_prune.append(mask_dict['layer3.3.conv2.weight_mask'])
module_to_prune.append(model.layer3[3].conv3)
mask_to_prune.append(mask_dict['layer3.3.conv3.weight_mask'])
module_to_prune.append(model.layer3[4].conv1)
mask_to_prune.append(mask_dict['layer3.4.conv1.weight_mask'])
module_to_prune.append(model.layer3[4].conv2)
mask_to_prune.append(mask_dict['layer3.4.conv2.weight_mask'])
module_to_prune.append(model.layer3[4].conv3)
mask_to_prune.append(mask_dict['layer3.4.conv3.weight_mask'])
module_to_prune.append(model.layer3[5].conv1)
mask_to_prune.append(mask_dict['layer3.5.conv1.weight_mask'])
module_to_prune.append(model.layer3[5].conv2)
mask_to_prune.append(mask_dict['layer3.5.conv2.weight_mask'])
module_to_prune.append(model.layer3[5].conv3)
mask_to_prune.append(mask_dict['layer3.5.conv3.weight_mask'])
#layer4
module_to_prune.append(model.layer4[0].conv1)
mask_to_prune.append(mask_dict['layer4.0.conv1.weight_mask'])
module_to_prune.append(model.layer4[0].conv2)
mask_to_prune.append(mask_dict['layer4.0.conv2.weight_mask'])
module_to_prune.append(model.layer4[0].conv3)
mask_to_prune.append(mask_dict['layer4.0.conv3.weight_mask'])
module_to_prune.append(model.layer4[0].downsample[0])
mask_to_prune.append(mask_dict['layer4.0.downsample.0.weight_mask'])
module_to_prune.append(model.layer4[1].conv1)
mask_to_prune.append(mask_dict['layer4.1.conv1.weight_mask'])
module_to_prune.append(model.layer4[1].conv2)
mask_to_prune.append(mask_dict['layer4.1.conv2.weight_mask'])
module_to_prune.append(model.layer4[1].conv3)
mask_to_prune.append(mask_dict['layer4.1.conv3.weight_mask'])
module_to_prune.append(model.layer4[2].conv1)
mask_to_prune.append(mask_dict['layer4.2.conv1.weight_mask'])
module_to_prune.append(model.layer4[2].conv2)
mask_to_prune.append(mask_dict['layer4.2.conv2.weight_mask'])
module_to_prune.append(model.layer4[2].conv3)
mask_to_prune.append(mask_dict['layer4.2.conv3.weight_mask'])
for ii in range(len(module_to_prune)):
prune.CustomFromMask.apply(module_to_prune[ii], 'weight', mask=mask_to_prune[ii])
def convert_moduledict_to_dict(module_dict):
new_dict = {}
for key in module_dict.keys():
new_key = key[7:]
new_dict[new_key] = module_dict[key]
return new_dict
def extract_mask(model_dict):
new_dict = {}
for key in model_dict.keys():
if 'mask' in key:
new_dict[key] = model_dict[key]
return new_dict
def add_orig_to_weight(model_dict):
new_dict = {}
mask_to_prune = []
mask_to_prune.append('conv1.weight')
mask_to_prune.append('layer1.0.conv1.weight')
mask_to_prune.append('layer1.0.conv2.weight')
mask_to_prune.append('layer1.1.conv1.weight')
mask_to_prune.append('layer1.1.conv2.weight')
mask_to_prune.append('layer2.0.conv1.weight')
mask_to_prune.append('layer2.0.conv2.weight')
mask_to_prune.append('layer2.1.conv1.weight')
mask_to_prune.append('layer2.1.conv2.weight')
mask_to_prune.append('layer2.0.downsample.0.weight')
mask_to_prune.append('layer3.0.conv1.weight')
mask_to_prune.append('layer3.0.conv2.weight')
mask_to_prune.append('layer3.1.conv1.weight')
mask_to_prune.append('layer3.1.conv2.weight')
mask_to_prune.append('layer3.0.downsample.0.weight')
mask_to_prune.append('layer4.0.conv1.weight')
mask_to_prune.append('layer4.0.conv2.weight')
mask_to_prune.append('layer4.1.conv1.weight')
mask_to_prune.append('layer4.1.conv2.weight')
mask_to_prune.append('layer4.0.downsample.0.weight')
for key in model_dict.keys():
if not 'fc' in key:
if key in mask_to_prune:
new_key = key+'_orig'
else:
new_key = key
new_dict[new_key] = model_dict[key]
return new_dict
| 46.51312
| 114
| 0.746521
| 5,088
| 31,908
| 4.370086
| 0.022602
| 0.149854
| 0.26076
| 0.177378
| 0.952192
| 0.948055
| 0.945536
| 0.919991
| 0.91347
| 0.896335
| 0
| 0.048743
| 0.112072
| 31,908
| 685
| 115
| 46.581022
| 0.736058
| 0.004231
| 0
| 0.770723
| 0
| 0
| 0.209348
| 0.199301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022928
| false
| 0
| 0.012346
| 0
| 0.040564
| 0.015873
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2133869ee093a8e13d1c62badb62e8f58e4c4ea5
| 182
|
py
|
Python
|
codewars/8kyu/doha22/kata8/hello_world/test.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | null | null | null |
codewars/8kyu/doha22/kata8/hello_world/test.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/8kyu/doha22/kata8/hello_world/test.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
import unittest
from hello_world import greet
def test_getVolumeOfCubiod(benchmark):
assert benchmark(greet, ) == "hello world!"
assert benchmark(greet, ) == "hello world!"
| 26
| 47
| 0.736264
| 21
| 182
| 6.285714
| 0.52381
| 0.227273
| 0.30303
| 0.378788
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159341
| 182
| 6
| 48
| 30.333333
| 0.862745
| 0
| 0
| 0.4
| 0
| 0
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2155fcf4987dc7f667ab132ac7c09e8e7f5349c2
| 127
|
py
|
Python
|
githubactioncontexthelper/__init__.py
|
mlspec/githubactioncontexthelper
|
d3d3535e204c47875a058a64b3c7fe7d70dc0479
|
[
"Apache-2.0"
] | 1
|
2021-01-25T00:40:25.000Z
|
2021-01-25T00:40:25.000Z
|
githubactioncontexthelper/__init__.py
|
mlspec/githubactioncontexthelper
|
d3d3535e204c47875a058a64b3c7fe7d70dc0479
|
[
"Apache-2.0"
] | null | null | null |
githubactioncontexthelper/__init__.py
|
mlspec/githubactioncontexthelper
|
d3d3535e204c47875a058a64b3c7fe7d70dc0479
|
[
"Apache-2.0"
] | null | null | null |
from githubactioncontexthelper.githubactioncontext import Context
from githubactioncontexthelper.__version__ import __version__
| 63.5
| 65
| 0.929134
| 10
| 127
| 11
| 0.6
| 0.527273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055118
| 127
| 2
| 66
| 63.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dcc5d20168ead8ca3819af831b2165038ead738a
| 2,292
|
py
|
Python
|
Proyecto_Global_Hitss_RF_v1/Funciones/Ingresar_colaborador.py
|
marlonsale08/Marlon
|
07570fb4aefd2427564e77c45a15e36e3fca3b19
|
[
"MIT"
] | null | null | null |
Proyecto_Global_Hitss_RF_v1/Funciones/Ingresar_colaborador.py
|
marlonsale08/Marlon
|
07570fb4aefd2427564e77c45a15e36e3fca3b19
|
[
"MIT"
] | null | null | null |
Proyecto_Global_Hitss_RF_v1/Funciones/Ingresar_colaborador.py
|
marlonsale08/Marlon
|
07570fb4aefd2427564e77c45a15e36e3fca3b19
|
[
"MIT"
] | null | null | null |
'''Funcion guarda 40 fotografias de un nuevo ingreso en Hitss'''
import cv2
import time
import os
from PIL import Image,ImageDraw
def tomador_fotos_cerca(cam=None,Id=None):
i=1
dest="ClasificadorKNN/train/"+Id+"/"
switch=True
cam.set(3,1920)
cam.set(4,1080)
while switch:
if cv2.waitKey(1) & 0xFF == ord('q'):
switch = False
#.release()
break
ret, foto=cam.read()
foto_g=foto
if i==20:
i=0
cv2.imwrite(dest + "foto%i.jpg" % i, foto_g)
color=(255,0,0)
parametro=60
ancho=cam.get(4)
ancho=int(ancho)
largo=cam.get(3)
largo=int(largo)
top = (ancho)//2-parametro
right =(largo)//2-parametro
bottom = (ancho)//2+parametro
left = (largo)//2+parametro
#draw=ImageDraw.Draw(foto)
#draw.rectangle(((left,top),(rigth,bottom)),outline=(0,0,255))
cv2.rectangle(foto, (left, top), (right, bottom), color, 3)
cv2.imshow("Video",foto_g)
i=i+1
'''cam=cv2.VideoCapture(0)
cam.set(10,100)
nombre="Marlon"
tomador_fotos(cam,nombre)
cv2.destroyAllWindows()'''
def tomador_fotos_lejos(cam=None,Id=None):
i=20
dest="ClasificadorKNN/train/"+Id+"/"
print(type(Id))
print(dest)
switch=True
cam.set(3,1920)
cam.set(4,1080)
while switch:
if cv2.waitKey(1) & 0xFF == ord('q'):
switch = False
#.release()
break
if i==40:
i=20
ret, foto=cam.read()
foto_g=foto
cv2.imwrite(dest + "foto%i.jpg" % i, foto_g)
color=(255,0,0)
parametro=60
ancho=cam.get(4)
ancho=int(ancho)
largo=cam.get(3)
largo=int(largo)
top = (ancho)//2-parametro
right =(largo)//2-parametro
bottom = (ancho)//2+parametro
left = (largo)//2+parametro
#draw=ImageDraw.Draw(foto)
#draw.rectangle(((left,top),(rigth,bottom)),outline=(0,0,255))
cv2.rectangle(foto, (left, top), (right, bottom), color, 3)
cv2.imshow("Video",foto_g)
i=i+1
#print(ancho+largo)
'''cam=cv2.VideoCapture(0)
cam.set(10,100)
nombre="Marlon"
tomador_fotos(cam,nombre)
cv2.destroyAllWindows()'''
| 24.126316
| 70
| 0.558464
| 311
| 2,292
| 4.07717
| 0.263666
| 0.063091
| 0.047319
| 0.020505
| 0.804416
| 0.782334
| 0.782334
| 0.746057
| 0.746057
| 0.746057
| 0
| 0.061324
| 0.281414
| 2,292
| 94
| 71
| 24.382979
| 0.708561
| 0.117365
| 0
| 0.806452
| 0
| 0
| 0.0436
| 0.024595
| 0
| 0
| 0.004472
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.096774
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d179a6823960a9272a7f95d1f954528a689f1f1
| 145
|
py
|
Python
|
generated-libraries/python/netapp/fpolicy/engine_name.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/fpolicy/engine_name.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/fpolicy/engine_name.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
class EngineName(basestring):
"""
Engine name
"""
@staticmethod
def get_api_name():
return "engine-name"
| 14.5
| 30
| 0.537931
| 13
| 145
| 5.846154
| 0.769231
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.351724
| 145
| 9
| 31
| 16.111111
| 0.808511
| 0.075862
| 0
| 0
| 0
| 0
| 0.09322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
0d441f5c71ab3af2aebf815a4863b05c4c04dee9
| 1,561
|
py
|
Python
|
tests/data/require_descriptive_names.py
|
hfz1337/algorithms-keeper
|
f87e92a8f13b437030e83ee4f9c7497a7b2cfbce
|
[
"MIT"
] | null | null | null |
tests/data/require_descriptive_names.py
|
hfz1337/algorithms-keeper
|
f87e92a8f13b437030e83ee4f9c7497a7b2cfbce
|
[
"MIT"
] | null | null | null |
tests/data/require_descriptive_names.py
|
hfz1337/algorithms-keeper
|
f87e92a8f13b437030e83ee4f9c7497a7b2cfbce
|
[
"MIT"
] | null | null | null |
def all_args(a: int, b: str, c: bool) -> None:
"""All arguments require descriptive names
>>> all_args(1, "a", True)
None
"""
return None
def some_args(num: int, s: str, b: bool) -> None:
"""Some arguments require descriptive names
>>> some_args(1, "a", True)
None
"""
return None
def no_args(num: int, boolean: bool) -> None:
"""No arguments require descriptive names
>>> no_args(1, True)
None
"""
return None
def f(a: int = 10) -> None:
"""Function and argument both require descriptive names
>>> f()
None
"""
return None
class ClassTest:
def __init__(self, a: int) -> None:
"""No point in having doctest in here"""
self.a = a
def cls_all_args(self, a: int, b: str, c: bool) -> None:
"""All arguments require descriptive names
>>> cls_all_args(1, "a", True)
None
"""
return None
def cls_some_args(self, num: int, s: str, b: bool) -> None:
"""Some arguments require descriptive names
>>> cls_some_args(1, "a", True)
None
"""
return None
def cls_no_args(self, num: int, boolean: bool) -> None:
"""No arguments require descriptive names
>>> cls_no_args(1, True)
None
"""
return None
def c(self, a: int = 10) -> None:
"""Function and argument both require descriptive names
>>> c()
None
"""
return None
class C:
"""A class which requires descriptive names"""
pass
| 21.985915
| 63
| 0.55221
| 203
| 1,561
| 4.137931
| 0.20197
| 0.171429
| 0.219048
| 0.228571
| 0.74881
| 0.738095
| 0.738095
| 0.738095
| 0.671429
| 0.519048
| 0
| 0.00939
| 0.317745
| 1,561
| 70
| 64
| 22.3
| 0.779343
| 0.410634
| 0
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.047619
| 0
| 0
| 0.904762
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b4a09e2f1b57107e6003c75e9c4ca7561a90df22
| 56
|
py
|
Python
|
resources/neurons/list_available_orders/__init__.py
|
lya-corp/lya
|
04b32f3191072ed21f20b93397015dbfcf9e7bb3
|
[
"MIT"
] | 3
|
2020-06-19T20:08:54.000Z
|
2021-06-30T11:25:41.000Z
|
resources/neurons/list_available_orders/__init__.py
|
flolep2607/Lya
|
669072b6b80ef493591b28ecc29bebd587913af0
|
[
"MIT"
] | null | null | null |
resources/neurons/list_available_orders/__init__.py
|
flolep2607/Lya
|
669072b6b80ef493591b28ecc29bebd587913af0
|
[
"MIT"
] | 1
|
2018-04-04T16:10:22.000Z
|
2018-04-04T16:10:22.000Z
|
from list_available_orders import List_available_orders
| 28
| 55
| 0.928571
| 8
| 56
| 6
| 0.625
| 0.541667
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 56
| 1
| 56
| 56
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b4e7c0806032f62e465f7957d87dd7141c136ebf
| 2,489
|
py
|
Python
|
tests/calibration/test_charuco_point_detector.py
|
SciKit-Surgery/scikit-surgeryimage
|
a51d2ff5a612a0918ae22000239c95c472ff4edf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/calibration/test_charuco_point_detector.py
|
SciKit-Surgery/scikit-surgeryimage
|
a51d2ff5a612a0918ae22000239c95c472ff4edf
|
[
"BSD-3-Clause"
] | 4
|
2022-01-12T10:18:28.000Z
|
2022-03-22T09:46:12.000Z
|
tests/calibration/test_charuco_point_detector.py
|
SciKit-Surgery/scikit-surgeryimage
|
a51d2ff5a612a0918ae22000239c95c472ff4edf
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
"""
Tests for ChArUco implementation of PointDetector.
"""
import cv2 as cv2
from cv2 import aruco
import pytest
from sksurgeryimage.calibration.charuco_point_detector import CharucoPointDetector
def test_charuco_detector():
image = cv2.imread('tests/data/calibration/test-charuco.png')
dictionary = cv2.aruco.Dictionary_get(aruco.DICT_4X4_250)
detector = CharucoPointDetector(dictionary, (13, 10), (3, 2))
ids, object_points, image_points = detector.get_points(image)
assert ids.shape[0] == 108
assert ids.shape[1] == 1
assert object_points.shape[0] == 108
assert object_points.shape[1] == 3
assert image_points.shape[0] == 108
assert image_points.shape[1] == 2
model = detector.get_model_points()
assert model.shape[0] == 108
def test_charuco_detector_with_masked_image():
image = cv2.imread('tests/data/calibration/test-charuco-blanked.png')
dictionary = cv2.aruco.Dictionary_get(aruco.DICT_4X4_250)
detector = CharucoPointDetector(dictionary, (13, 10), (3, 2))
ids, object_points, image_points = detector.get_points(image)
assert ids.shape[0] == 45
assert ids.shape[1] == 1
assert object_points.shape[0] == 45
assert object_points.shape[1] == 3
assert image_points.shape[0] == 45
assert image_points.shape[1] == 2
def test_charuco_detector_with_filtering():
image = cv2.imread('tests/data/calibration/pattern_4x4_19x26_5_4_with_inset_13x18_corrupted2-landscape.png')
dictionary = cv2.aruco.Dictionary_get(aruco.DICT_4X4_250)
detector = CharucoPointDetector(dictionary, (19, 26), (5, 4), filtering=True)
ids, object_points, image_points = detector.get_points(image)
assert ids.shape[0] == 315
assert ids.shape[1] == 1
assert object_points.shape[0] == 315
assert object_points.shape[1] == 3
assert image_points.shape[0] == 315
assert image_points.shape[1] == 2
def test_charuco_detector_without_filtering():
image = cv2.imread('tests/data/calibration/pattern_4x4_19x26_5_4_with_inset_13x18_corrupted2-landscape.png')
dictionary = cv2.aruco.Dictionary_get(aruco.DICT_4X4_250)
detector = CharucoPointDetector(dictionary, (19, 26), (5, 4))
ids, object_points, image_points = detector.get_points(image)
assert ids.shape[0] == 321
assert ids.shape[1] == 1
assert object_points.shape[0] == 321
assert object_points.shape[1] == 3
assert image_points.shape[0] == 321
assert image_points.shape[1] == 2
| 37.712121
| 112
| 0.7272
| 351
| 2,489
| 4.94302
| 0.179487
| 0.101441
| 0.064553
| 0.106052
| 0.832277
| 0.802305
| 0.77464
| 0.77464
| 0.722767
| 0.722767
| 0
| 0.073739
| 0.155484
| 2,489
| 65
| 113
| 38.292308
| 0.751665
| 0.025713
| 0
| 0.48
| 0
| 0
| 0.106744
| 0.106744
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3700b2e25e577ec1bf285df0c4cbafe4d2873f18
| 1,882
|
py
|
Python
|
tic_tac_toe.py
|
aaghamohammadi/tic-tac-toe
|
1b1f1f2fb70b05bdc1023e6bd99666b1648bd68f
|
[
"MIT"
] | 2
|
2018-05-15T04:50:05.000Z
|
2020-05-06T07:59:41.000Z
|
tic_tac_toe.py
|
aaghamohammadi/tic-tac-toe
|
1b1f1f2fb70b05bdc1023e6bd99666b1648bd68f
|
[
"MIT"
] | null | null | null |
tic_tac_toe.py
|
aaghamohammadi/tic-tac-toe
|
1b1f1f2fb70b05bdc1023e6bd99666b1648bd68f
|
[
"MIT"
] | 1
|
2020-05-06T07:59:42.000Z
|
2020-05-06T07:59:42.000Z
|
from pprint import pprint
board_game = [['-'] * 3 for i in range(3)]
is_finish = False
def show_board_game():
for i in range(3):
pprint(board_game[i])
def turn_X():
first_pass = True
row = 0
col = 0
while first_pass or board_game[row][col] == '-':
first_pass = False
num = input('Player X: ')
row,col = num.split(',')
row = int(row)
col = int(col)
board_game[row][col] = 'X'
show_board_game()
def turn_O():
first_pass = True
row = 0
col = 0
while first_pass or board_game[row][col] == '-':
first_pass = False
num = input('Player O: ')
row,col = num.split(',')
row = int(row)
col = int(col)
board_game[row][col] = 'O'
show_board_game()
def check(player):
if board_game[0][0] == player and board_game[0][1] == player and board_game[0][2] == player:
return True
elif board_game[0][0] == player and board_game[1][0] == player and board_game[2][0] == player:
return True
elif board_game[0][0] == player and board_game[1][1] == player and board_game[2][2] == player:
return True
elif board_game[0][2] == player and board_game[1][1] == player and board_game[2][0] == player:
return True
elif board_game[0][2] == player and board_game[1][2] == player and board_game[2][2] == player:
return True
elif board_game[2][0] == player and board_game[2][1] == player and board_game[2][2] == player:
return True
def start_game():
for _ in range(9):
turn_X()
is_finish = check('X')
if is_finish == True:
pprint('Winner is player X')
return
turn_O()
is_finish = check('O')
if is_finish == True:
pprint('Winner is player O')
return
start_game()
| 26.138889
| 98
| 0.552072
| 278
| 1,882
| 3.564748
| 0.147482
| 0.245207
| 0.169526
| 0.217962
| 0.774975
| 0.727548
| 0.706357
| 0.706357
| 0.607467
| 0.607467
| 0
| 0.033691
| 0.306057
| 1,882
| 71
| 99
| 26.507042
| 0.725115
| 0
| 0
| 0.5
| 0
| 0
| 0.034574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0.107143
| 0.017857
| 0
| 0.25
| 0.071429
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2ea6e8e634ea89f6e4d25131616eb03711996933
| 172
|
py
|
Python
|
GeometryMath.py
|
mateusfg7/Geometry-Math
|
21e0a9ad2d8a567f50223d7f9b3310f1a08d324c
|
[
"MIT"
] | 6
|
2020-01-20T13:18:39.000Z
|
2020-09-25T17:28:21.000Z
|
GeometryMath.py
|
mateusfg7/Geometry-Math
|
21e0a9ad2d8a567f50223d7f9b3310f1a08d324c
|
[
"MIT"
] | 3
|
2020-01-17T11:46:45.000Z
|
2020-05-25T13:18:20.000Z
|
GeometryMath.py
|
mateusfg7/Geometry-Math
|
21e0a9ad2d8a567f50223d7f9b3310f1a08d324c
|
[
"MIT"
] | null | null | null |
from components.header import header
from components.menu import main_menu
from components.menu.FlatFiguresMenu import flatFiguresMenu
main_menu(header, flatFiguresMenu)
| 24.571429
| 59
| 0.866279
| 21
| 172
| 7
| 0.333333
| 0.285714
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 172
| 6
| 60
| 28.666667
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2ed885135ea62cca22bf6eeaea2072dd340e0560
| 18,132
|
py
|
Python
|
sdk/python/pulumi_openstack/sharedfilesystem/share_access.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/sharedfilesystem/share_access.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/sharedfilesystem/share_access.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ShareAccessArgs', 'ShareAccess']
@pulumi.input_type
class ShareAccessArgs:
def __init__(__self__, *,
access_level: pulumi.Input[str],
access_to: pulumi.Input[str],
access_type: pulumi.Input[str],
share_id: pulumi.Input[str],
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ShareAccess resource.
:param pulumi.Input[str] access_level: The access level to the share. Can either be `rw` or `ro`.
:param pulumi.Input[str] access_to: The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
:param pulumi.Input[str] access_type: The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
:param pulumi.Input[str] share_id: The UUID of the share to which you are granted access.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
"""
pulumi.set(__self__, "access_level", access_level)
pulumi.set(__self__, "access_to", access_to)
pulumi.set(__self__, "access_type", access_type)
pulumi.set(__self__, "share_id", share_id)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="accessLevel")
def access_level(self) -> pulumi.Input[str]:
"""
The access level to the share. Can either be `rw` or `ro`.
"""
return pulumi.get(self, "access_level")
@access_level.setter
def access_level(self, value: pulumi.Input[str]):
pulumi.set(self, "access_level", value)
@property
@pulumi.getter(name="accessTo")
def access_to(self) -> pulumi.Input[str]:
"""
The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
"""
return pulumi.get(self, "access_to")
@access_to.setter
def access_to(self, value: pulumi.Input[str]):
pulumi.set(self, "access_to", value)
@property
@pulumi.getter(name="accessType")
def access_type(self) -> pulumi.Input[str]:
"""
The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
"""
return pulumi.get(self, "access_type")
@access_type.setter
def access_type(self, value: pulumi.Input[str]):
pulumi.set(self, "access_type", value)
@property
@pulumi.getter(name="shareId")
def share_id(self) -> pulumi.Input[str]:
"""
The UUID of the share to which you are granted access.
"""
return pulumi.get(self, "share_id")
@share_id.setter
def share_id(self, value: pulumi.Input[str]):
pulumi.set(self, "share_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _ShareAccessState:
def __init__(__self__, *,
access_key: Optional[pulumi.Input[str]] = None,
access_level: Optional[pulumi.Input[str]] = None,
access_to: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
share_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ShareAccess resources.
:param pulumi.Input[str] access_key: The access credential of the entity granted access.
:param pulumi.Input[str] access_level: The access level to the share. Can either be `rw` or `ro`.
:param pulumi.Input[str] access_to: The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
:param pulumi.Input[str] access_type: The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
:param pulumi.Input[str] share_id: The UUID of the share to which you are granted access.
"""
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if access_level is not None:
pulumi.set(__self__, "access_level", access_level)
if access_to is not None:
pulumi.set(__self__, "access_to", access_to)
if access_type is not None:
pulumi.set(__self__, "access_type", access_type)
if region is not None:
pulumi.set(__self__, "region", region)
if share_id is not None:
pulumi.set(__self__, "share_id", share_id)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[pulumi.Input[str]]:
"""
The access credential of the entity granted access.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="accessLevel")
def access_level(self) -> Optional[pulumi.Input[str]]:
"""
The access level to the share. Can either be `rw` or `ro`.
"""
return pulumi.get(self, "access_level")
@access_level.setter
def access_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_level", value)
@property
@pulumi.getter(name="accessTo")
def access_to(self) -> Optional[pulumi.Input[str]]:
"""
The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
"""
return pulumi.get(self, "access_to")
@access_to.setter
def access_to(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_to", value)
@property
@pulumi.getter(name="accessType")
def access_type(self) -> Optional[pulumi.Input[str]]:
"""
The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
"""
return pulumi.get(self, "access_type")
@access_type.setter
def access_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_type", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="shareId")
def share_id(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the share to which you are granted access.
"""
return pulumi.get(self, "share_id")
@share_id.setter
def share_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "share_id", value)
class ShareAccess(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_level: Optional[pulumi.Input[str]] = None,
access_to: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
share_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
This resource can be imported by specifying the ID of the share and the ID of the share access, separated by a slash, e.g.
```sh
$ pulumi import openstack:sharedfilesystem/shareAccess:ShareAccess share_access_1 <share id>/<share access id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_level: The access level to the share. Can either be `rw` or `ro`.
:param pulumi.Input[str] access_to: The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
:param pulumi.Input[str] access_type: The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
:param pulumi.Input[str] share_id: The UUID of the share to which you are granted access.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareAccessArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
This resource can be imported by specifying the ID of the share and the ID of the share access, separated by a slash, e.g.
```sh
$ pulumi import openstack:sharedfilesystem/shareAccess:ShareAccess share_access_1 <share id>/<share access id>
```
:param str resource_name: The name of the resource.
:param ShareAccessArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareAccessArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_level: Optional[pulumi.Input[str]] = None,
access_to: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
share_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareAccessArgs.__new__(ShareAccessArgs)
if access_level is None and not opts.urn:
raise TypeError("Missing required property 'access_level'")
__props__.__dict__["access_level"] = access_level
if access_to is None and not opts.urn:
raise TypeError("Missing required property 'access_to'")
__props__.__dict__["access_to"] = access_to
if access_type is None and not opts.urn:
raise TypeError("Missing required property 'access_type'")
__props__.__dict__["access_type"] = access_type
__props__.__dict__["region"] = region
if share_id is None and not opts.urn:
raise TypeError("Missing required property 'share_id'")
__props__.__dict__["share_id"] = share_id
__props__.__dict__["access_key"] = None
super(ShareAccess, __self__).__init__(
'openstack:sharedfilesystem/shareAccess:ShareAccess',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
access_level: Optional[pulumi.Input[str]] = None,
access_to: Optional[pulumi.Input[str]] = None,
access_type: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
share_id: Optional[pulumi.Input[str]] = None) -> 'ShareAccess':
"""
Get an existing ShareAccess resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_key: The access credential of the entity granted access.
:param pulumi.Input[str] access_level: The access level to the share. Can either be `rw` or `ro`.
:param pulumi.Input[str] access_to: The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
:param pulumi.Input[str] access_type: The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
:param pulumi.Input[str] share_id: The UUID of the share to which you are granted access.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ShareAccessState.__new__(_ShareAccessState)
__props__.__dict__["access_key"] = access_key
__props__.__dict__["access_level"] = access_level
__props__.__dict__["access_to"] = access_to
__props__.__dict__["access_type"] = access_type
__props__.__dict__["region"] = region
__props__.__dict__["share_id"] = share_id
return ShareAccess(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Output[str]:
"""
The access credential of the entity granted access.
"""
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="accessLevel")
def access_level(self) -> pulumi.Output[str]:
"""
The access level to the share. Can either be `rw` or `ro`.
"""
return pulumi.get(self, "access_level")
@property
@pulumi.getter(name="accessTo")
def access_to(self) -> pulumi.Output[str]:
"""
The value that defines the access. Can either be an IP
address or a username verified by configured Security Service of the Share Network.
"""
return pulumi.get(self, "access_to")
@property
@pulumi.getter(name="accessType")
def access_type(self) -> pulumi.Output[str]:
"""
The access rule type. Can either be an ip, user,
cert, or cephx. cephx support requires an OpenStack environment that supports
Shared Filesystem microversion 2.13 (Mitaka) or later.
"""
return pulumi.get(self, "access_type")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share access. Changing this
creates a new share access.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="shareId")
def share_id(self) -> pulumi.Output[str]:
"""
The UUID of the share to which you are granted access.
"""
return pulumi.get(self, "share_id")
| 43.691566
| 134
| 0.637437
| 2,294
| 18,132
| 4.849172
| 0.080645
| 0.074164
| 0.091873
| 0.073175
| 0.845829
| 0.821917
| 0.799263
| 0.761956
| 0.746224
| 0.725369
| 0
| 0.002343
| 0.270406
| 18,132
| 414
| 135
| 43.797101
| 0.838537
| 0.378778
| 0
| 0.605381
| 1
| 0
| 0.098012
| 0.00492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156951
| false
| 0.004484
| 0.022422
| 0
| 0.273543
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c245d814c58bba1b000156b41b77936e9890495
| 30,387
|
py
|
Python
|
test/integration/test_simple_workflows.py
|
boto/botoflow
|
49d8ed3bc9c57294504be82e933a051e1901b76e
|
[
"Apache-2.0"
] | 13
|
2016-06-15T06:10:57.000Z
|
2021-10-30T03:52:28.000Z
|
test/integration/test_simple_workflows.py
|
DalavanCloud/botoflow
|
49d8ed3bc9c57294504be82e933a051e1901b76e
|
[
"Apache-2.0"
] | 11
|
2016-09-15T01:48:08.000Z
|
2019-01-09T06:11:44.000Z
|
test/integration/test_simple_workflows.py
|
DalavanCloud/botoflow
|
49d8ed3bc9c57294504be82e933a051e1901b76e
|
[
"Apache-2.0"
] | 16
|
2016-06-05T03:42:04.000Z
|
2022-03-01T17:43:14.000Z
|
# -*- mode:python ; fill-column:120 -*-
import time
import unittest
from botoflow import (WorkflowDefinition, execute, return_, coroutine, activity, ThreadedWorkflowExecutor,
ThreadedActivityExecutor, WorkflowWorker, ActivityWorker, activity_options,
workflow_time, flow_types, workflow_starter, workflow)
from botoflow.exceptions import (ActivityTaskFailedError, WorkflowFailedError)
from utils import SWFMixIn
from various_activities import BunchOfActivities
class TestSimpleWorkflows(SWFMixIn, unittest.TestCase):
def test_no_activities(self):
class NoActivitiesWorkflow(WorkflowDefinition):
@execute(version='1.2', execution_start_to_close_timeout=60)
def execute(self, arg1):
return_(arg1)
with workflow_starter(self.session, self.region, self.domain, self.task_list) as starter:
instance = NoActivitiesWorkflow.execute(arg1="TestExecution")
self.workflow_execution = instance.workflow_execution
# start + stop should run the worker's Decider once
worker = ThreadedWorkflowExecutor(WorkflowWorker(
self.session, self.region, self.domain, self.task_list,
NoActivitiesWorkflow))
worker.start()
worker.stop()
worker.join()
time.sleep(2)
self.assertEqual("TestExecution", starter.wait_for_completion(instance, 1))
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 'TestExecution')
def test_no_activities_failure(self):
class NoActivitiesFailureWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1):
raise RuntimeError("ExecutionFailed")
worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, NoActivitiesFailureWorkflow)
with workflow_starter(self.session, self.region, self.domain, self.task_list) as starter:
instance = NoActivitiesFailureWorkflow.execute(arg1="TestExecution")
self.workflow_execution = instance.workflow_execution
worker.run_once()
time.sleep(1)
try:
starter.wait_for_completion(instance, 1)
except WorkflowFailedError as err:
self.assertEqual(RuntimeError, type(err.cause))
else:
self.fail("Should never succeed")
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionFailed')
self.assertEqual(str(self.serializer.loads(
hist[-1]['workflowExecutionFailedEventAttributes']['details'])[0]), "ExecutionFailed")
def test_no_activities_with_state(self):
class NoActivitiesWorkflow(WorkflowDefinition):
@execute(version='1.2', execution_start_to_close_timeout=60)
def execute(self, arg1):
self.workflow_state = "Workflow Started"
return_(arg1)
worker = ThreadedWorkflowExecutor(WorkflowWorker(
self.session, self.region, self.domain, self.task_list, NoActivitiesWorkflow))
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = NoActivitiesWorkflow.execute(arg1="TestExecution")
self.workflow_execution = instance.workflow_execution
# start + stop should run the worker's Decider once
worker.start()
worker.stop()
worker.join()
time.sleep(2)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(
hist[-2]['decisionTaskCompletedEventAttributes']['executionContext'],
'Workflow Started')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 'TestExecution')
def test_one_activity(self):
class OneActivityWorkflow(WorkflowDefinition):
def __init__(self, workflow_execution):
super(OneActivityWorkflow, self).__init__(workflow_execution)
self.activities_client = BunchOfActivities()
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
arg_sum = yield self.activities_client.sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityWorkflow)
act_worker = ThreadedActivityExecutor(ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities()))
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.start(1, 4)
act_worker.stop()
wf_worker.run_once()
act_worker.join()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 3)
def test_one_priority_activity_and_worker(self):
class OneActivityWorkflow(WorkflowDefinition):
def __init__(self, workflow_execution):
super(OneActivityWorkflow, self).__init__(workflow_execution)
self.activities_client = BunchOfActivities()
@execute(version='1.1', execution_start_to_close_timeout=60, task_priority=20)
def execute(self, arg1, arg2):
arg_sum = yield self.activities_client.priority_sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityWorkflow)
act_worker = ThreadedActivityExecutor(ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities()))
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.start(1, 4)
act_worker.stop()
wf_worker.run_once()
act_worker.join()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 3)
def test_one_activity_timed(self):
class OneActivityTimedWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
mytime = workflow_time.time()
yield BunchOfActivities.sum(arg1, arg2)
return_([mytime, workflow_time.time()])
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityTimedWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityTimedWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), [
int(time.mktime(hist[2]['eventTimestamp'].timetuple())),
int(time.mktime(hist[8]['eventTimestamp'].timetuple()))])
def test_one_activity_dynamic(self):
class OneActivityTimedWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
# create an activity call dynamically
sum = flow_types.ActivityType('1.1', name='BunchOfActivities.sum')
arg_sum = yield sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityTimedWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityTimedWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 3)
def test_one_activity_options_overrides(self):
class OneActivityWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
with activity_options(start_to_close_timeout=66):
arg_sum = yield BunchOfActivities.sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[4]['activityTaskScheduledEventAttributes']['startToCloseTimeout'], '66')
def test_one_activity_with_timer(self):
class OneActivityWithTimerWorkflow(WorkflowDefinition):
def __init__(self, workflow_execution):
super(OneActivityWithTimerWorkflow, self).__init__(workflow_execution)
self.activities_client = BunchOfActivities()
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
yield workflow_time.sleep(2)
arg_sum = yield self.activities_client.sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityWithTimerWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityWithTimerWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 16)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
# timer specific checks
self.assertEqual(hist[4]['eventType'], 'TimerStarted')
self.assertEqual(hist[4]['timerStartedEventAttributes']['startToFireTimeout'], '2')
self.assertEqual(hist[5]['eventType'], 'TimerFired')
def test_one_activity_default_task_list(self):
class OneActivityCustomTaskList(object):
@activity(version='1.1', task_list='abracadabra')
def sum(self, x, y):
return x + y
class OneActivityDefaultTaskListWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
arg_sum = yield OneActivityCustomTaskList.sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list,
OneActivityDefaultTaskListWorkflow)
act_worker = ThreadedActivityExecutor(ActivityWorker(
self.session, self.region, self.domain, 'abracadabra',
OneActivityCustomTaskList()))
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityDefaultTaskListWorkflow.execute(
arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.start(1, 4)
act_worker.stop()
wf_worker.run_once()
act_worker.join()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[4]['activityTaskScheduledEventAttributes']
['taskList']['name'], 'abracadabra')
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 3)
def test_one_activity_options_overrides_priority(self):
class OneActivityWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
with activity_options(task_priority=66):
arg_sum = yield BunchOfActivities.sum(arg1, arg2)
return_(arg_sum)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, OneActivityWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneActivityWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 11)
self.assertEqual(hist[4]['activityTaskScheduledEventAttributes']['taskPriority'], '66')
def test_try_except_finally_activity(self):
class TryExceptFinallyWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
@coroutine
def do_try_except():
arg_sum = 0
try:
arg_sum += yield BunchOfActivities.sum(arg1, arg2)
yield BunchOfActivities.throw()
except ActivityTaskFailedError as err:
if isinstance(err.cause, ValueError) \
and str(err.cause) == 'Hello-Error':
if err.event_id != 13 or err.activity_id != '2':
raise RuntimeError("Test Failed")
arg_sum += yield BunchOfActivities.sum(arg1, arg2)
finally:
arg_sum += yield BunchOfActivities.sum(arg1, arg2)
return_(arg_sum)
result = yield do_try_except()
return_(result)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, TryExceptFinallyWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = TryExceptFinallyWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
for i in range(4):
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 29)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 9)
def test_try_except_with_timer(self):
class TryExceptFinallyWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
@coroutine
def do_try_except():
arg_sum = 0
try:
arg_sum += yield BunchOfActivities.sum(arg1, arg2)
yield BunchOfActivities.throw()
except ActivityTaskFailedError as err:
if isinstance(err.cause, ValueError) \
and str(err.cause) == 'Hello-Error':
if err.event_id != 13 or err.activity_id != '2':
raise RuntimeError("Test Failed")
arg_sum += yield BunchOfActivities.sum(arg1, arg2)
yield workflow_time.sleep(1)
return_(arg_sum)
result = yield do_try_except()
return_(result)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, TryExceptFinallyWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = TryExceptFinallyWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
for i in range(3):
wf_worker.run_once()
act_worker.run_once()
# Once for the timer
wf_worker.run_once()
# Once for the completion
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 28)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 6)
def test_two_activities(self):
class BunchOfActivitiesWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
arg_sum = yield BunchOfActivities.sum(arg1, arg2)
arg_mul = yield BunchOfActivities.mul(arg1, arg2)
return_((arg_sum, arg_mul))
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivitiesWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = BunchOfActivitiesWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 17)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), (3, 2))
def test_next_page_token_activities(self):
# process over a hundred events, so that we're clear we can work with nextPageToken
class NextPageTokenWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, repeat, arg1):
for i in range(repeat):
yield BunchOfActivities.sum(i, arg1)
return_(repeat)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, NextPageTokenWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = NextPageTokenWorkflow.execute(repeat=21, arg1=1)
self.workflow_execution = instance.workflow_execution
for i in range(21):
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once() # finish off
time.sleep(1)
hist, token = self.get_workflow_execution_history_with_token()
events = hist
hist = self.get_workflow_execution_history(next_page_token=token)
events.extend(hist)
self.assertEqual(len(events), 131)
self.assertEqual(events[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
events[-1]['workflowExecutionCompletedEventAttributes']['result']), 21)
def test_all_future_activities(self):
class AllFutureWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
sum_future = BunchOfActivities.sum(arg1, arg2)
mul_future = BunchOfActivities.mul(arg1, arg2)
result = yield sum_future, mul_future
return_(result)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, AllFutureWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = AllFutureWorkflow.execute(arg1=1, arg2=2)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 17)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), (3, 2))
def test_any_future_activities(self):
class SleepingActivities(object):
@activity(version='1.2',
schedule_to_start_timeout=60,
start_to_close_timeout=60)
def sleep(self, time_to_sleep):
time.sleep(time_to_sleep)
return time_to_sleep
class AnyFutureWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1, arg2):
sleep1_future = SleepingActivities.sleep(arg1)
sleep2_future = SleepingActivities.sleep(arg2)
result = yield sleep1_future | sleep2_future
return_(result)
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, AnyFutureWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, SleepingActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = AnyFutureWorkflow.execute(arg1=5, arg2=1)
self.workflow_execution = instance.workflow_execution
wf_worker.run_once()
act_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 14)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertTrue(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']))
def test_workflow_continue_as_new(self):
class NoActivitiesWorkflow(WorkflowDefinition):
@execute(version='1.1', execution_start_to_close_timeout=60)
def execute(self, arg1):
if arg1 > 0:
arg1 -= 1
self.execute(arg1)
else:
return "TestExecution"
worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, NoActivitiesWorkflow)
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = NoActivitiesWorkflow.execute(arg1=1)
self.workflow_execution = instance.workflow_execution
for i in range(2):
worker.run_once()
time.sleep(1)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionContinuedAsNew')
new_run_id = hist[-1]['workflowExecutionContinuedAsNewEventAttributes']['newExecutionRunId']
hist = self.get_workflow_execution_history(run_id=new_run_id)
self.assertEqual(len(hist), 5)
self.assertEqual(hist[-1]['eventType'], 'WorkflowExecutionCompleted')
self.assertEqual(self.serializer.loads(
hist[-1]['workflowExecutionCompletedEventAttributes']['result']), 'TestExecution')
def test_subclassed_workflow(self):
class SuperClassWorkflow(WorkflowDefinition):
@execute(version='1.0', execution_start_to_close_timeout=60)
def execute(self):
pass
class SubClassWorkflow(SuperClassWorkflow):
@execute(version='1.0', execution_start_to_close_timeout=60)
def execute(self):
pass
worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, SubClassWorkflow)
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = SubClassWorkflow.execute()
self.workflow_execution = instance.workflow_execution
worker.run_once()
time.sleep(2)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
def test_subclassed_workflow_no_exec(self):
class SuperClassWorkflow(WorkflowDefinition):
@execute(version='1.0', execution_start_to_close_timeout=60)
def execute(self):
pass
class SubClassWorkflow(SuperClassWorkflow):
pass
worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, SubClassWorkflow)
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = SubClassWorkflow.execute()
self.workflow_execution = instance.workflow_execution
worker.run_once()
time.sleep(2)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
def test_subclassed_workflow_multiver(self):
class MultiverWorkflow(WorkflowDefinition):
@execute(version='1.0', execution_start_to_close_timeout=60)
def start_wf(self):
pass
@workflow(name='MultiverWorkflow')
class SubMultiverWorkflow(MultiverWorkflow):
@execute(version='1.1', execution_start_to_close_timeout=60)
def start_wf(self):
pass
@execute(version='1.2', execution_start_to_close_timeout=60)
def start_wf_v2(self):
pass
worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list, SubMultiverWorkflow)
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = SubMultiverWorkflow.start_wf()
self.workflow_execution = instance.workflow_execution
worker.run_once()
time.sleep(2)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = SubMultiverWorkflow.start_wf_v2()
self.workflow_execution = instance.workflow_execution
worker.run_once()
time.sleep(2)
hist = self.get_workflow_execution_history()
self.assertEqual(len(hist), 5)
self.assertEqual({'name': 'MultiverWorkflow', 'version': '1.2'},
hist[0]
['workflowExecutionStartedEventAttributes']
['workflowType'])
if __name__ == '__main__':
unittest.main()
| 42.029046
| 106
| 0.64363
| 3,083
| 30,387
| 6.129744
| 0.076873
| 0.066568
| 0.045243
| 0.06334
| 0.806805
| 0.802836
| 0.785586
| 0.772939
| 0.76733
| 0.762197
| 0
| 0.017871
| 0.257906
| 30,387
| 722
| 107
| 42.087258
| 0.820177
| 0.010893
| 0
| 0.718861
| 0
| 0
| 0.071757
| 0.044365
| 0
| 0
| 0
| 0
| 0.115658
| 1
| 0.092527
| false
| 0.012456
| 0.010676
| 0.001779
| 0.156584
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c2804e059200fb8cc3d76d3e7dbefaa2a7278fc
| 839
|
py
|
Python
|
tests/test_loads.py
|
eng-tools/sfsimodels
|
4771f7693c7ed30c05e82e41401c7d141e02dcf9
|
[
"MIT"
] | 4
|
2017-12-16T10:17:13.000Z
|
2020-10-13T05:04:19.000Z
|
tests/test_loads.py
|
eng-tools/sfsimodels
|
4771f7693c7ed30c05e82e41401c7d141e02dcf9
|
[
"MIT"
] | 1
|
2021-05-19T05:33:43.000Z
|
2021-05-19T05:33:43.000Z
|
tests/test_loads.py
|
eng-tools/sfsimodels
|
4771f7693c7ed30c05e82e41401c7d141e02dcf9
|
[
"MIT"
] | 2
|
2020-11-07T04:46:55.000Z
|
2021-07-29T07:07:44.000Z
|
from sfsimodels.models import loads
def test_loads_add_and_remove():
load = loads.Load(p_x=10)
assert load.p_x == 10
assert load.p_y is None
load.p_x = 15
load.p_y = 9
assert load.p_x == 15
assert load.p_y == 9
load.p_z = 3
load.t_xx = 4
load.t_yy = 5
load.t_zz = 6
assert load.p_z == 3
assert load.t_xx == 4
assert load.t_yy == 5
assert load.t_zz == 6
def test_load_at_coords_add_and_remove():
load = loads.LoadAtCoords(p_x=10, x=3)
assert load.p_x == 10
assert load.p_y is None
load.p_x = 15
load.p_y = 9
assert load.p_x == 15
assert load.p_y == 9
load.p_z = 3
load.t_xx = 4
load.t_yy = 5
load.t_zz = 6
assert load.p_z == 3
assert load.t_xx == 4
assert load.t_yy == 5
assert load.t_zz == 6
assert load.x == 3
| 21.512821
| 42
| 0.598331
| 167
| 839
| 2.766467
| 0.191617
| 0.183983
| 0.238095
| 0.103896
| 0.822511
| 0.731602
| 0.709957
| 0.692641
| 0.692641
| 0.692641
| 0
| 0.064189
| 0.294398
| 839
| 38
| 43
| 22.078947
| 0.716216
| 0
| 0
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.058824
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.